Merge branch 'master' of https://github.com/harmony-one/harmony into crash_fix

pull/1417/head
flicker-harmony 5 years ago
commit a026d3d6a3
  1. 22
      .hmy/wallet.ini
  2. 7
      api/client/service/server_test.go
  3. 32
      api/proto/node/node.go
  4. 2
      api/proto/node/node_test.go
  5. 19
      api/service/explorer/service.go
  6. 4
      api/service/explorer/storage_test.go
  7. 4
      api/service/explorer/structs.go
  8. 2
      api/service/explorer/structs_test.go
  9. 2
      api/service/networkinfo/service.go
  10. 41
      api/service/syncing/syncing.go
  11. 22
      cmd/client/wallet/generated_wallet.ini.go
  12. 14
      cmd/client/wallet/main.go
  13. 51
      cmd/harmony/main.go
  14. 99
      consensus/consensus.go
  15. 184
      consensus/consensus_service.go
  16. 6
      consensus/consensus_v2.go
  17. 2
      consensus/engine/consensus_engine.go
  18. 66
      core/block_validator.go
  19. 244
      core/blockchain.go
  20. 6
      core/chain_makers.go
  21. 14
      core/core_test.go
  22. 10
      core/evm.go
  23. 2
      core/genesis.go
  24. 110
      core/rawdb/accessors_chain.go
  25. 2
      core/rawdb/accessors_indexes_test.go
  26. 59
      core/rawdb/schema.go
  27. 3
      core/state/statedb.go
  28. 87
      core/state_processor.go
  29. 1
      core/state_transition.go
  30. 2
      core/tx_pool_test.go
  31. 4
      core/types.go
  32. 98
      core/types/block.go
  33. 83
      core/types/crosslink.go
  34. 180
      core/types/cx_receipt.go
  35. 44
      core/types/derive_sha.go
  36. 44
      core/types/gen_tx_json.go
  37. 11
      core/types/receipt.go
  38. 75
      core/types/transaction.go
  39. 11
      core/types/transaction_signing.go
  40. 13
      core/vm/evm.go
  41. 2
      drand/drand_test.go
  42. 2
      hmyclient/hmyclient.go
  43. 187
      internal/chain/engine.go
  44. 105
      internal/chain/reward.go
  45. 41
      internal/chain/sig.go
  46. 2
      internal/common/address.go
  47. 11
      internal/common/address_test.go
  48. 2
      internal/configs/node/config.go
  49. 4
      internal/configs/sharding/fixedschedule.go
  50. 8
      internal/configs/sharding/localnet.go
  51. 6
      internal/configs/sharding/mainnet.go
  52. 6
      internal/configs/sharding/pangaea.go
  53. 3
      internal/configs/sharding/shardingconfig.go
  54. 6
      internal/configs/sharding/testnet.go
  55. 12
      internal/genesis/localnodes.go
  56. 6
      internal/hmyapi/blockchain.go
  57. 30
      internal/shardchain/shardchains.go
  58. 64
      internal/utils/gomock_reflect_579506979/prog.go
  59. 84
      node/node.go
  60. 278
      node/node_cross_shard.go
  61. 10
      node/node_error.go
  62. 9
      node/node_explorer.go
  63. 4
      node/node_genesis.go
  64. 191
      node/node_handler.go
  65. 88
      node/node_newblock.go
  66. 154
      node/node_syncing.go
  67. 145
      node/node_test.go
  68. 61
      node/worker/worker.go
  69. 13
      node/worker/worker_test.go
  70. 8
      scripts/node.sh
  71. 7
      test/chain/main.go
  72. 1
      test/configs/local-resharding.txt
  73. 3
      test/configs/local.txt
  74. 2
      test/debug.sh
  75. 27
      test/deploy.sh
  76. 8
      test/kill_node.sh

@ -50,24 +50,22 @@ rpc = l1.b.hmny.io:14555
rpc = s1.b.hmny.io:14555 rpc = s1.b.hmny.io:14555
[pangaea] [pangaea]
bootnode = /ip4/100.26.90.187/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/54.86.126.90/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/54.213.43.194/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/52.40.84.2/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/13.113.101.219/tcp/9867/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
bootnode = /ip4/99.81.170.167/tcp/9867/p2p/QmRVbTpEYup8dSaURZfF6ByrMTSKa4UyUzJhSjahFzRqNj
shards = 4 shards = 4
[pangaea.shard0.rpc] [pangaea.shard0.rpc]
rpc = l0.p.hmny.io:14555 rpc = l0.pga.hmny.io:14555
rpc = s0.p.hmny.io:14555 rpc = s0.pga.hmny.io:14555
[pangaea.shard1.rpc] [pangaea.shard1.rpc]
rpc = l1.p.hmny.io:14555 rpc = l1.pga.hmny.io:14555
rpc = s1.p.hmny.io:14555 rpc = s1.pga.hmny.io:14555
[pangaea.shard2.rpc] [pangaea.shard2.rpc]
rpc = l2.p.hmny.io:14555 rpc = l2.pga.hmny.io:14555
rpc = s2.p.hmny.io:14555 rpc = s2.pga.hmny.io:14555
[pangaea.shard3.rpc] [pangaea.shard3.rpc]
rpc = l3.p.hmny.io:14555 rpc = l3.pga.hmny.io:14555
rpc = s3.p.hmny.io:14555 rpc = s3.pga.hmny.io:14555

@ -6,6 +6,8 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/harmony-one/harmony/internal/chain"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
client "github.com/harmony-one/harmony/api/client/service/proto" client "github.com/harmony-one/harmony/api/client/service/proto"
@ -15,7 +17,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
) )
@ -62,7 +63,7 @@ func TestFetchAccountState(test *testing.T) {
genesis := gspec.MustCommit(database) genesis := gspec.MustCommit(database)
_ = genesis _ = genesis
chain, _ := core.NewBlockChain(database, nil, gspec.Config, consensus.NewFaker(), vm.Config{}, nil) chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain.Engine, vm.Config{}, nil)
hash := common.Hash{} hash := common.Hash{}
hash.SetBytes([]byte("hello")) hash.SetBytes([]byte("hello"))
@ -99,7 +100,7 @@ func TestGetStakingContractInfo(test *testing.T) {
genesis := gspec.MustCommit(database) genesis := gspec.MustCommit(database)
_ = genesis _ = genesis
chain, _ := core.NewBlockChain(database, nil, gspec.Config, consensus.NewFaker(), vm.Config{}, nil) chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain.Engine, vm.Config{}, nil)
hash := common.Hash{} hash := common.Hash{}
hash.SetBytes([]byte("hello")) hash.SetBytes([]byte("hello"))

@ -27,7 +27,6 @@ const (
PING // node send ip/pki to register with leader PING // node send ip/pki to register with leader
PONG // node broadcast pubK PONG // node broadcast pubK
ShardState ShardState
// TODO: add more types
) )
// BlockchainSyncMessage is a struct for blockchain sync message. // BlockchainSyncMessage is a struct for blockchain sync message.
@ -96,6 +95,9 @@ type BlockMessageType int
// Block sync message subtype // Block sync message subtype
const ( const (
Sync BlockMessageType = iota Sync BlockMessageType = iota
Header // used for crosslink from beacon chain to shard chain
Receipt // cross-shard transaction receipts
) )
// SerializeBlockchainSyncMessage serializes BlockchainSyncMessage. // SerializeBlockchainSyncMessage serializes BlockchainSyncMessage.
@ -157,6 +159,17 @@ func ConstructBlocksSyncMessage(blocks []*types.Block) []byte {
return byteBuffer.Bytes() return byteBuffer.Bytes()
} }
// ConstructCrossLinkHeadersMessage constructs cross link header message to send to beacon chain
func ConstructCrossLinkHeadersMessage(headers []*types.Header) []byte {
byteBuffer := bytes.NewBuffer([]byte{byte(proto.Node)})
byteBuffer.WriteByte(byte(Block))
byteBuffer.WriteByte(byte(Header))
headersData, _ := rlp.EncodeToBytes(headers)
byteBuffer.Write(headersData)
return byteBuffer.Bytes()
}
// ConstructEpochShardStateMessage contructs epoch shard state message // ConstructEpochShardStateMessage contructs epoch shard state message
func ConstructEpochShardStateMessage(epochShardState types.EpochShardState) []byte { func ConstructEpochShardStateMessage(epochShardState types.EpochShardState) []byte {
byteBuffer := bytes.NewBuffer([]byte{byte(proto.Node)}) byteBuffer := bytes.NewBuffer([]byte{byte(proto.Node)})
@ -186,3 +199,20 @@ func DeserializeEpochShardStateFromMessage(payload []byte) (*types.EpochShardSta
return epochShardState, nil return epochShardState, nil
} }
// ConstructCXReceiptsProof constructs cross shard receipts and merkle proof
func ConstructCXReceiptsProof(cxs types.CXReceipts, mkp *types.CXMerkleProof) []byte {
msg := &types.CXReceiptsProof{Receipts: cxs, MerkleProof: mkp}
byteBuffer := bytes.NewBuffer([]byte{byte(proto.Node)})
byteBuffer.WriteByte(byte(Block))
byteBuffer.WriteByte(byte(Receipt))
by, err := rlp.EncodeToBytes(msg)
if err != nil {
utils.Logger().Error().Err(err).Msg("[ConstructCXReceiptsProof] Encode CXReceiptsProof Error")
return []byte{}
}
byteBuffer.Write(by)
return byteBuffer.Bytes()
}

@ -88,7 +88,7 @@ func TestConstructBlocksSyncMessage(t *testing.T) {
t.Fatalf("statedb.Database().TrieDB().Commit() failed: %s", err) t.Fatalf("statedb.Database().TrieDB().Commit() failed: %s", err)
} }
block1 := types.NewBlock(head, nil, nil) block1 := types.NewBlock(head, nil, nil, nil, nil)
blocks := []*types.Block{ blocks := []*types.Block{
block1, block1,

@ -296,7 +296,26 @@ func (s *Service) GetCommittee(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(400) w.WriteHeader(400)
return return
} }
// fetch current epoch if epoch is 0
db := s.storage.GetDB() db := s.storage.GetDB()
if epoch == 0 {
bytes, err := db.Get([]byte(BlockHeightKey))
blockHeight, err := strconv.Atoi(string(bytes))
if err != nil {
utils.Logger().Warn().Err(err).Msg("cannot decode block height from DB")
w.WriteHeader(500)
return
}
key := GetBlockKey(blockHeight)
data, err := db.Get([]byte(key))
block := new(types.Block)
if rlp.DecodeBytes(data, block) != nil {
utils.Logger().Warn().Err(err).Msg("cannot get block from db")
w.WriteHeader(500)
return
}
epoch = block.Epoch().Uint64()
}
bytes, err := db.Get([]byte(GetCommitteeKey(uint32(shardID), epoch))) bytes, err := db.Get([]byte(GetCommitteeKey(uint32(shardID), epoch)))
if err != nil { if err != nil {
utils.Logger().Warn().Err(err).Msg("cannot read committee") utils.Logger().Warn().Err(err).Msg("cannot read committee")

@ -54,7 +54,7 @@ func TestDump(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil) block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil)
ins := GetStorageInstance("1.1.1.1", "3333", true) ins := GetStorageInstance("1.1.1.1", "3333", true)
ins.Dump(block, uint64(1)) ins.Dump(block, uint64(1))
db := ins.GetDB() db := ins.GetDB()
@ -112,7 +112,7 @@ func TestUpdateAddressStorage(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil) block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil)
ins := GetStorageInstance("1.1.1.1", "3333", true) ins := GetStorageInstance("1.1.1.1", "3333", true)
ins.Dump(block, uint64(1)) ins.Dump(block, uint64(1))
db := ins.GetDB() db := ins.GetDB()

@ -64,6 +64,7 @@ type Block struct {
NextBlock RefBlock `json:"nextBlock"` NextBlock RefBlock `json:"nextBlock"`
TXs []*Transaction `json:"txs"` TXs []*Transaction `json:"txs"`
Signers []string `json:"signers"` Signers []string `json:"signers"`
Epoch uint64 `json:"epoch"`
ExtraData string `json:"extra_data"` ExtraData string `json:"extra_data"`
} }
@ -100,6 +101,8 @@ func NewBlock(block *types.Block, height int) *Block {
} }
} }
} }
} else {
utils.Logger().Warn().Err(err).Msgf("bad state block %d", block.NumberU64())
} }
return &Block{ return &Block{
Height: strconv.Itoa(height), Height: strconv.Itoa(height),
@ -109,6 +112,7 @@ func NewBlock(block *types.Block, height int) *Block {
MerkleRoot: block.Root().Hex(), MerkleRoot: block.Root().Hex(),
Bytes: strconv.Itoa(int(block.Size())), Bytes: strconv.Itoa(int(block.Size())),
Signers: signers, Signers: signers,
Epoch: block.Epoch().Uint64(),
ExtraData: string(block.Extra()), ExtraData: string(block.Extra()),
} }
} }

@ -18,7 +18,7 @@ func TestGetTransaction(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil) block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil)
tx := GetTransaction(tx1, block) tx := GetTransaction(tx1, block)
assert.Equal(t, tx.ID, tx1.Hash().Hex(), "should be equal tx1.Hash()") assert.Equal(t, tx.ID, tx1.Hash().Hex(), "should be equal tx1.Hash()")

@ -132,6 +132,7 @@ func (s *Service) Init() error {
utils.Logger().Info().Str("Rendezvous", string(s.Rendezvous)).Msg("Announcing ourselves...") utils.Logger().Info().Str("Rendezvous", string(s.Rendezvous)).Msg("Announcing ourselves...")
s.discovery = libp2pdis.NewRoutingDiscovery(s.dht) s.discovery = libp2pdis.NewRoutingDiscovery(s.dht)
libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous)) libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous))
libp2pdis.Advertise(ctx, s.discovery, string(p2p.GroupIDBeaconClient))
utils.Logger().Info().Msg("Successfully announced!") utils.Logger().Info().Msg("Successfully announced!")
return nil return nil
@ -157,6 +158,7 @@ func (s *Service) DoService() {
return return
case <-tick.C: case <-tick.C:
libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous)) libp2pdis.Advertise(ctx, s.discovery, string(s.Rendezvous))
libp2pdis.Advertise(ctx, s.discovery, string(p2p.GroupIDBeaconClient))
utils.Logger().Info().Str("Rendezvous", string(s.Rendezvous)).Msg("Successfully announced!") utils.Logger().Info().Str("Rendezvous", string(s.Rendezvous)).Msg("Successfully announced!")
default: default:
var err error var err error

@ -31,6 +31,7 @@ const (
SyncingPortDifference = 3000 SyncingPortDifference = 3000
inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus inSyncThreshold = 0 // when peerBlockHeight - myBlockHeight <= inSyncThreshold, it's ready to join consensus
BatchSize uint32 = 1000 //maximum size for one query of block hashes BatchSize uint32 = 1000 //maximum size for one query of block hashes
SyncLoopFrequency = 1 // unit in second
) )
// SyncPeerConfig is peer config to sync. // SyncPeerConfig is peer config to sync.
@ -532,9 +533,19 @@ func (ss *StateSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Ha
func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) bool { func (ss *StateSync) updateBlockAndStatus(block *types.Block, bc *core.BlockChain, worker *worker.Worker) bool {
utils.Logger().Info().Str("blockHex", bc.CurrentBlock().Hash().Hex()).Msg("[SYNC] Current Block") utils.Logger().Info().Str("blockHex", bc.CurrentBlock().Hash().Hex()).Msg("[SYNC] Current Block")
// Verify block signatures
if block.NumberU64() > 1 {
err := core.VerifyBlockLastCommitSigs(bc, block)
if err != nil {
utils.Logger().Error().Err(err).Msgf("[SYNC] failed verifying signatures for new block %d", block.NumberU64())
return false
}
}
_, err := bc.InsertChain([]*types.Block{block}) _, err := bc.InsertChain([]*types.Block{block})
if err != nil { if err != nil {
utils.Logger().Error().Err(err).Msg("[SYNC] Error adding new block to blockchain") utils.Logger().Error().Err(err).Msgf("[SYNC] Error adding new block to blockchain %d %d", block.NumberU64(), block.ShardID())
utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back current block!") utils.Logger().Debug().Interface("block", bc.CurrentBlock()).Msg("[SYNC] Rolling back current block!")
bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()}) bc.Rollback([]common.Hash{bc.CurrentBlock().Hash()})
@ -722,20 +733,24 @@ func (ss *StateSync) SyncLoop(bc *core.BlockChain, worker *worker.Worker, willJo
if !isBeacon { if !isBeacon {
ss.RegisterNodeInfo() ss.RegisterNodeInfo()
} }
ticker := time.NewTicker(SyncLoopFrequency * time.Second)
for { for {
otherHeight := ss.getMaxPeerHeight() select {
currentHeight := bc.CurrentBlock().NumberU64() case <-ticker.C:
if currentHeight >= otherHeight { otherHeight := ss.getMaxPeerHeight()
utils.Logger().Info().Msg("[SYNC] Node is now IN SYNC!") currentHeight := bc.CurrentBlock().NumberU64()
break if currentHeight >= otherHeight {
} utils.Logger().Info().Msgf("[SYNC] Node is now IN SYNC! (ShardID: %d)", bc.ShardID())
startHash := bc.CurrentBlock().Hash() break
size := uint32(otherHeight - currentHeight) }
if size > BatchSize { startHash := bc.CurrentBlock().Hash()
size = BatchSize size := uint32(otherHeight - currentHeight)
if size > BatchSize {
size = BatchSize
}
ss.ProcessStateSync(startHash[:], size, bc, worker)
ss.purgeOldBlocksFromCache()
} }
ss.ProcessStateSync(startHash[:], size, bc, worker)
ss.purgeOldBlocksFromCache()
} }
ss.purgeAllBlocksFromCache() ss.purgeAllBlocksFromCache()
} }

@ -53,26 +53,24 @@ rpc = l1.b.hmny.io:14555
rpc = s1.b.hmny.io:14555 rpc = s1.b.hmny.io:14555
[pangaea] [pangaea]
bootnode = /ip4/100.26.90.187/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv bootnode = /ip4/54.86.126.90/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
bootnode = /ip4/54.213.43.194/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 bootnode = /ip4/52.40.84.2/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
bootnode = /ip4/13.113.101.219/tcp/9867/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
bootnode = /ip4/99.81.170.167/tcp/9867/p2p/QmRVbTpEYup8dSaURZfF6ByrMTSKa4UyUzJhSjahFzRqNj
shards = 4 shards = 4
[pangaea.shard0.rpc] [pangaea.shard0.rpc]
rpc = l0.p.hmny.io:14555 rpc = l0.pga.hmny.io:14555
rpc = s0.p.hmny.io:14555 rpc = s0.pga.hmny.io:14555
[pangaea.shard1.rpc] [pangaea.shard1.rpc]
rpc = l1.p.hmny.io:14555 rpc = l1.pga.hmny.io:14555
rpc = s1.p.hmny.io:14555 rpc = s1.pga.hmny.io:14555
[pangaea.shard2.rpc] [pangaea.shard2.rpc]
rpc = l2.p.hmny.io:14555 rpc = l2.pga.hmny.io:14555
rpc = s2.p.hmny.io:14555 rpc = s2.pga.hmny.io:14555
[pangaea.shard3.rpc] [pangaea.shard3.rpc]
rpc = l3.p.hmny.io:14555 rpc = l3.pga.hmny.io:14555
rpc = s3.p.hmny.io:14555 rpc = s3.pga.hmny.io:14555
` `
) )

@ -92,6 +92,7 @@ var (
transferReceiverPtr = transferCommand.String("to", "", "Specify the receiver account") transferReceiverPtr = transferCommand.String("to", "", "Specify the receiver account")
transferAmountPtr = transferCommand.Float64("amount", 0, "Specify the amount to transfer") transferAmountPtr = transferCommand.Float64("amount", 0, "Specify the amount to transfer")
transferShardIDPtr = transferCommand.Int("shardID", 0, "Specify the shard ID for the transfer") transferShardIDPtr = transferCommand.Int("shardID", 0, "Specify the shard ID for the transfer")
transferToShardIDPtr = transferCommand.Int("toShardID", 0, "Specify the destination shard ID for the transfer")
transferInputDataPtr = transferCommand.String("inputData", "", "Base64-encoded input data to embed in the transaction") transferInputDataPtr = transferCommand.String("inputData", "", "Base64-encoded input data to embed in the transaction")
transferSenderPassPtr = transferCommand.String("pass", "", "Passphrase of the sender's private key") transferSenderPassPtr = transferCommand.String("pass", "", "Passphrase of the sender's private key")
@ -160,6 +161,7 @@ func main() {
fmt.Println(" --to - The receiver account's address") fmt.Println(" --to - The receiver account's address")
fmt.Println(" --amount - The amount of token to transfer") fmt.Println(" --amount - The amount of token to transfer")
fmt.Println(" --shardID - The shard Id for the transfer") fmt.Println(" --shardID - The shard Id for the transfer")
fmt.Println(" --toShardID - The destination shard Id for the transfer")
fmt.Println(" --inputData - Base64-encoded input data to embed in the transaction") fmt.Println(" --inputData - Base64-encoded input data to embed in the transaction")
fmt.Println(" --pass - Passphrase of sender's private key") fmt.Println(" --pass - Passphrase of sender's private key")
fmt.Println(" 8. export - Export account key to a new file") fmt.Println(" 8. export - Export account key to a new file")
@ -652,6 +654,7 @@ func processTransferCommand() {
receiver := *transferReceiverPtr receiver := *transferReceiverPtr
amount := *transferAmountPtr amount := *transferAmountPtr
shardID := *transferShardIDPtr shardID := *transferShardIDPtr
toShardID := *transferToShardIDPtr
base64InputData := *transferInputDataPtr base64InputData := *transferInputDataPtr
senderPass := *transferSenderPassPtr senderPass := *transferSenderPassPtr
@ -662,7 +665,7 @@ func processTransferCommand() {
return return
} }
if shardID == -1 { if shardID == -1 || toShardID == -1 {
fmt.Println("Please specify the shard ID for the transfer (e.g. --shardID=0)") fmt.Println("Please specify the shard ID for the transfer (e.g. --shardID=0)")
return return
} }
@ -708,8 +711,11 @@ func processTransferCommand() {
return return
} }
tx := types.NewTransaction( fromShard := uint32(shardID)
state.nonce, receiverAddress, uint32(shardID), amountBigInt, toShard := uint32(toShardID)
var tx *types.Transaction
tx = types.NewCrossShardTransaction(
state.nonce, &receiverAddress, fromShard, toShard, amountBigInt,
gas, nil, inputData) gas, nil, inputData)
account, err := ks.Find(accounts.Account{Address: senderAddress}) account, err := ks.Find(accounts.Account{Address: senderAddress})
@ -726,7 +732,7 @@ func processTransferCommand() {
fmt.Printf("Unlock account succeeded! '%v'\n", senderPass) fmt.Printf("Unlock account succeeded! '%v'\n", senderPass)
tx, err = ks.SignTx(account, tx, nil) tx, err = ks.SignTx(account, tx, big.NewInt(1))
if err != nil { if err != nil {
fmt.Printf("SignTx Error: %v\n", err) fmt.Printf("SignTx Error: %v\n", err)
return return

@ -9,12 +9,15 @@ import (
"os" "os"
"path" "path"
"runtime" "runtime"
"strconv"
"time" "time"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/api/service/syncing"
"github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/internal/blsgen" "github.com/harmony-one/harmony/internal/blsgen"
@ -166,7 +169,13 @@ func initSetup() {
} }
func passphraseForBls() { func passphraseForBls() {
if *isExplorer {
return
}
// If FN node running, they should either specify blsPrivateKey or the file with passphrase // If FN node running, they should either specify blsPrivateKey or the file with passphrase
if *isExplorer {
return
}
if *blsKeyFile == "" || *blsPass == "" { if *blsKeyFile == "" || *blsPass == "" {
fmt.Println("Internal nodes need to have pass to decrypt blskey") fmt.Println("Internal nodes need to have pass to decrypt blskey")
os.Exit(101) os.Exit(101)
@ -294,10 +303,25 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
chainDBFactory := &shardchain.LDBFactory{RootDir: nodeConfig.DBDir} chainDBFactory := &shardchain.LDBFactory{RootDir: nodeConfig.DBDir}
currentNode := node.New(nodeConfig.Host, currentConsensus, chainDBFactory, *isArchival) currentNode := node.New(nodeConfig.Host, currentConsensus, chainDBFactory, *isArchival)
if *dnsZone != "" { switch {
currentNode.SetDNSZone(*dnsZone) case *networkType == nodeconfig.Localnet:
} else if *dnsFlag { epochConfig := core.ShardingSchedule.InstanceForEpoch(ethCommon.Big0)
currentNode.SetDNSZone("t.hmny.io") selfPort, err := strconv.ParseUint(*port, 10, 16)
if err != nil {
utils.Logger().Fatal().
Err(err).
Str("self_port_string", *port).
Msg("cannot convert self port string into port number")
}
currentNode.SyncingPeerProvider = node.NewLocalSyncingPeerProvider(
6000, uint16(selfPort), epochConfig.NumShards(), uint32(epochConfig.NumNodesPerShard()))
case *dnsZone != "":
currentNode.SyncingPeerProvider = node.NewDNSSyncingPeerProvider(*dnsZone, syncing.GetSyncingPort(*port))
case *dnsFlag:
currentNode.SyncingPeerProvider = node.NewDNSSyncingPeerProvider("t.hmny.io", syncing.GetSyncingPort(*port))
default:
currentNode.SyncingPeerProvider = node.NewLegacySyncingPeerProvider(currentNode)
} }
// TODO: add staking support // TODO: add staking support
// currentNode.StakingAccount = myAccount // currentNode.StakingAccount = myAccount
@ -312,6 +336,8 @@ func setupConsensusAndNode(nodeConfig *nodeconfig.ConfigType) *node.Node {
currentNode.NodeConfig.SetPushgatewayPort(nodeConfig.PushgatewayPort) currentNode.NodeConfig.SetPushgatewayPort(nodeConfig.PushgatewayPort)
currentNode.NodeConfig.SetMetricsFlag(nodeConfig.MetricsFlag) currentNode.NodeConfig.SetMetricsFlag(nodeConfig.MetricsFlag)
currentNode.NodeConfig.SetBeaconGroupID(p2p.NewGroupIDByShardID(0))
if *isExplorer { if *isExplorer {
currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode) currentNode.NodeConfig.SetRole(nodeconfig.ExplorerNode)
currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(*shardID))) currentNode.NodeConfig.SetShardGroupID(p2p.NewGroupIDByShardID(p2p.ShardID(*shardID)))
@ -419,9 +445,9 @@ func main() {
nodeConfig := createGlobalConfig() nodeConfig := createGlobalConfig()
currentNode := setupConsensusAndNode(nodeConfig) currentNode := setupConsensusAndNode(nodeConfig)
//if consensus.ShardID != 0 { if currentNode.Blockchain().ShardID() != 0 {
// go currentNode.SupportBeaconSyncing() go currentNode.SupportBeaconSyncing()
//} }
startMsg := "==== New Harmony Node ====" startMsg := "==== New Harmony Node ===="
if *isExplorer { if *isExplorer {
@ -450,5 +476,16 @@ func main() {
} }
} }
currentNode.RunServices() currentNode.RunServices()
// Run additional node collectors
// Collect node metrics if metrics flag is set
if currentNode.NodeConfig.GetMetricsFlag() {
go currentNode.CollectMetrics()
}
// Commit committtee if node role is explorer
if currentNode.NodeConfig.Role() == nodeconfig.ExplorerNode {
go currentNode.CommitCommittee()
}
currentNode.StartServer() currentNode.StartServer()
} }

@ -10,15 +10,11 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/common/denominations"
consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/contracts/structs" "github.com/harmony-one/harmony/contracts/structs"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls"
common2 "github.com/harmony-one/harmony/internal/common"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/genesis" "github.com/harmony-one/harmony/internal/genesis"
"github.com/harmony-one/harmony/internal/memprofiling" "github.com/harmony-one/harmony/internal/memprofiling"
@ -31,9 +27,6 @@ const (
vdfAndSeedSize = 548 // size of VDF/Proof and Seed vdfAndSeedSize = 548 // size of VDF/Proof and Seed
) )
// BlockReward is the block reward, to be split evenly among block signers.
var BlockReward = new(big.Int).Mul(big.NewInt(24), big.NewInt(denominations.One))
// Consensus is the main struct with all states and data related to consensus process. // Consensus is the main struct with all states and data related to consensus process.
type Consensus struct { type Consensus struct {
// PbftLog stores the pbft messages and blocks during PBFT process // PbftLog stores the pbft messages and blocks during PBFT process
@ -307,96 +300,6 @@ func New(host p2p.Host, ShardID uint32, leader p2p.Peer, blsPriKey *bls.SecretKe
return &consensus, nil return &consensus, nil
} }
// accumulateRewards credits the coinbase of the given block with the mining
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
// Returns node block reward or error.
func accumulateRewards(
bc consensus_engine.ChainReader, state *state.DB, header *types.Header, nodeAddress common.Address,
) (*big.Int, error) {
blockNum := header.Number.Uint64()
if blockNum == 0 {
// Epoch block has no parent to reward.
return nil, nil
}
// TODO ek – retrieving by parent number (blockNum - 1) doesn't work,
// while it is okay with hash. Sounds like DB inconsistency.
// Figure out why.
parentHeader := bc.GetHeaderByHash(header.ParentHash)
if parentHeader == nil {
return nil, ctxerror.New("cannot find parent block header in DB",
"parentHash", header.ParentHash)
}
if parentHeader.Number.Cmp(common.Big0) == 0 {
// Parent is an epoch block,
// which is not signed in the usual manner therefore rewards nothing.
return nil, nil
}
parentShardState, err := bc.ReadShardState(parentHeader.Epoch)
if err != nil {
return nil, ctxerror.New("cannot read shard state",
"epoch", parentHeader.Epoch,
).WithCause(err)
}
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID)
if parentCommittee == nil {
return nil, ctxerror.New("cannot find shard in the shard state",
"parentBlockNumber", parentHeader.Number,
"shardID", parentHeader.ShardID,
)
}
var committerKeys []*bls.PublicKey
for _, member := range parentCommittee.NodeList {
committerKey := new(bls.PublicKey)
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil {
return nil, ctxerror.New("cannot convert BLS public key",
"blsPublicKey", member.BlsPublicKey).WithCause(err)
}
committerKeys = append(committerKeys, committerKey)
}
mask, err := bls_cosi.NewMask(committerKeys, nil)
if err != nil {
return nil, ctxerror.New("cannot create group sig mask").WithCause(err)
}
if err := mask.SetMask(header.LastCommitBitmap); err != nil {
return nil, ctxerror.New("cannot set group sig mask bits").WithCause(err)
}
totalAmount := big.NewInt(0)
var accounts []common.Address
signers := []string{}
for idx, member := range parentCommittee.NodeList {
if signed, err := mask.IndexEnabled(idx); err != nil {
return nil, ctxerror.New("cannot check for committer bit",
"committerIndex", idx,
).WithCause(err)
} else if signed {
accounts = append(accounts, member.EcdsaAddress)
}
}
numAccounts := big.NewInt(int64(len(accounts)))
last := new(big.Int)
nodeReward := big.NewInt(0)
for i, account := range accounts {
cur := new(big.Int)
cur.Mul(BlockReward, big.NewInt(int64(i+1))).Div(cur, numAccounts)
diff := new(big.Int).Sub(cur, last)
signers = append(signers, common2.MustAddressToBech32(account))
if account == nodeAddress {
nodeReward = diff
}
state.AddBalance(account, diff)
totalAmount = new(big.Int).Add(totalAmount, diff)
last = cur
}
header.Logger(utils.Logger()).Debug().
Str("NumAccounts", numAccounts.String()).
Str("TotalAmount", totalAmount.String()).
Strs("Signers", signers).
Msg("[Block Reward] Successfully paid out block reward")
return nodeReward, nil
}
// GenesisStakeInfoFinder is a stake info finder implementation using only // GenesisStakeInfoFinder is a stake info finder implementation using only
// genesis accounts. // genesis accounts.
// When used for block reward, it rewards only foundational nodes. // When used for block reward, it rewards only foundational nodes.

@ -1,28 +1,26 @@
package consensus package consensus
import ( import (
"encoding/binary"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"time" "time"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/crypto/hash" "github.com/harmony-one/harmony/crypto/hash"
"github.com/harmony-one/harmony/internal/chain"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto" protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/bls/ffi/go/bls"
libp2p_peer "github.com/libp2p/go-libp2p-peer" libp2p_peer "github.com/libp2p/go-libp2p-peer"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"golang.org/x/crypto/sha3"
msg_pb "github.com/harmony-one/harmony/api/proto/message" msg_pb "github.com/harmony-one/harmony/api/proto/message"
consensus_engine "github.com/harmony-one/harmony/consensus/engine" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
@ -59,48 +57,6 @@ func (consensus *Consensus) GetNextRnd() ([vdFAndProofSize]byte, [32]byte, error
return vdfBytes, seed, nil return vdfBytes, seed, nil
} }
// SealHash returns the hash of a block prior to it being sealed.
func (consensus *Consensus) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
// TODO: update with new fields
if err := rlp.Encode(hasher, []interface{}{
header.ParentHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra,
}); err != nil {
utils.Logger().Warn().Err(err).Msg("rlp.Encode failed")
}
hasher.Sum(hash[:0])
return hash
}
// Seal is to seal final block.
func (consensus *Consensus) Seal(chain consensus_engine.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
// TODO: implement final block sealing
return nil
}
// Author returns the author of the block header.
func (consensus *Consensus) Author(header *types.Header) (common.Address, error) {
// TODO: implement this
return common.Address{}, nil
}
// Prepare is to prepare ...
// TODO(RJ): fix it.
func (consensus *Consensus) Prepare(chain consensus_engine.ChainReader, header *types.Header) error {
// TODO: implement prepare method
return nil
}
// Populates the common basic fields for all consensus message. // Populates the common basic fields for all consensus message.
func (consensus *Consensus) populateMessageFields(request *msg_pb.ConsensusRequest) { func (consensus *Consensus) populateMessageFields(request *msg_pb.ConsensusRequest) {
request.ViewId = consensus.viewID request.ViewId = consensus.viewID
@ -198,107 +154,6 @@ func NewFaker() *Consensus {
return &Consensus{} return &Consensus{}
} }
// VerifyHeader checks whether a header conforms to the consensus rules of the bft engine.
func (consensus *Consensus) VerifyHeader(chain consensus_engine.ChainReader, header *types.Header, seal bool) error {
parentHeader := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parentHeader == nil {
return consensus_engine.ErrUnknownAncestor
}
if seal {
if err := consensus.VerifySeal(chain, header); err != nil {
return err
}
}
return nil
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications.
func (consensus *Consensus) VerifyHeaders(chain consensus_engine.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
abort, results := make(chan struct{}), make(chan error, len(headers))
for i := 0; i < len(headers); i++ {
results <- nil
}
return abort, results
}
// retrievePublicKeysFromLastBlock finds the public keys of last block's committee
func retrievePublicKeysFromLastBlock(bc consensus_engine.ChainReader, header *types.Header) ([]*bls.PublicKey, error) {
parentHeader := bc.GetHeaderByHash(header.ParentHash)
if parentHeader == nil {
return nil, ctxerror.New("cannot find parent block header in DB",
"parentHash", header.ParentHash)
}
parentShardState, err := bc.ReadShardState(parentHeader.Epoch)
if err != nil {
return nil, ctxerror.New("cannot read shard state",
"epoch", parentHeader.Epoch,
).WithCause(err)
}
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID)
if parentCommittee == nil {
return nil, ctxerror.New("cannot find shard in the shard state",
"parentBlockNumber", parentHeader.Number,
"shardID", parentHeader.ShardID,
)
}
var committerKeys []*bls.PublicKey
for _, member := range parentCommittee.NodeList {
committerKey := new(bls.PublicKey)
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil {
return nil, ctxerror.New("cannot convert BLS public key",
"blsPublicKey", member.BlsPublicKey).WithCause(err)
}
committerKeys = append(committerKeys, committerKey)
}
return committerKeys, nil
}
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
// the PoS difficulty requirements, i.e. >= 2f+1 valid signatures from the committee
func (consensus *Consensus) VerifySeal(chain consensus_engine.ChainReader, header *types.Header) error {
if chain.CurrentHeader().Number.Uint64() <= uint64(1) {
return nil
}
publicKeys, err := retrievePublicKeysFromLastBlock(chain, header)
if err != nil {
return ctxerror.New("[VerifySeal] Cannot retrieve publickeys from last block").WithCause(err)
}
payload := append(header.LastCommitSignature[:], header.LastCommitBitmap...)
aggSig, mask, err := readSignatureBitmapByPublicKeys(payload, publicKeys)
if err != nil {
return ctxerror.New("[VerifySeal] Unable to deserialize the LastCommitSignature and LastCommitBitmap in Block Header").WithCause(err)
}
if count := utils.CountOneBits(mask.Bitmap); count < consensus.PreviousQuorum() {
return ctxerror.New("[VerifySeal] Not enough signature in LastCommitSignature from Block Header", "need", consensus.Quorum(), "got", count)
}
blockNumHash := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumHash, header.Number.Uint64()-1)
lastCommitPayload := append(blockNumHash, header.ParentHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, lastCommitPayload) {
return ctxerror.New("[VerifySeal] Unable to verify aggregated signature from last block", "lastBlockNum", header.Number.Uint64()-1, "lastBlockHash", header.ParentHash)
}
return nil
}
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
// setting the final state and assembling the block.
func (consensus *Consensus) Finalize(chain consensus_engine.ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, receipts []*types.Receipt) (*types.Block, error) {
// Accumulate any block and uncle rewards and commit the final state root
// Header seems complete, assemble into a block and return
blockReward, err := accumulateRewards(chain, state, header, consensus.SelfAddress)
if err != nil {
return nil, ctxerror.New("cannot pay block reward").WithCause(err)
}
consensus.lastBlockReward = blockReward
header.Root = state.IntermediateRoot(false)
return types.NewBlock(header, txs, receipts), nil
}
// Sign on the hash of the message // Sign on the hash of the message
func (consensus *Consensus) signMessage(message []byte) []byte { func (consensus *Consensus) signMessage(message []byte) []byte {
hash := hash.Keccak256(message) hash := hash.Keccak256(message)
@ -548,38 +403,7 @@ func (consensus *Consensus) ReadSignatureBitmapPayload(recvPayload []byte, offse
return nil, nil, errors.New("payload not have enough length") return nil, nil, errors.New("payload not have enough length")
} }
sigAndBitmapPayload := recvPayload[offset:] sigAndBitmapPayload := recvPayload[offset:]
return readSignatureBitmapByPublicKeys(sigAndBitmapPayload, consensus.PublicKeys) return chain.ReadSignatureBitmapByPublicKeys(sigAndBitmapPayload, consensus.PublicKeys)
}
// readSignatureBitmapByPublicKeys read the payload of signature and bitmap based on public keys
func readSignatureBitmapByPublicKeys(recvPayload []byte, publicKeys []*bls.PublicKey) (*bls.Sign, *bls_cosi.Mask, error) {
if len(recvPayload) < 96 {
return nil, nil, errors.New("payload not have enough length")
}
payload := append(recvPayload[:0:0], recvPayload...)
//#### Read payload data
// 96 byte of multi-sig
offset := 0
multiSig := payload[offset : offset+96]
offset += 96
// bitmap
bitmap := payload[offset:]
//#### END Read payload data
aggSig := bls.Sign{}
err := aggSig.Deserialize(multiSig)
if err != nil {
return nil, nil, errors.New("unable to deserialize multi-signature from payload")
}
mask, err := bls_cosi.NewMask(publicKeys, nil)
if err != nil {
utils.Logger().Warn().Err(err).Msg("onNewView unable to setup mask for prepared message")
return nil, nil, errors.New("unable to setup mask from payload")
}
if err := mask.SetMask(bitmap); err != nil {
utils.Logger().Warn().Err(err).Msg("mask.SetMask failed")
}
return &aggSig, mask, nil
} }
func (consensus *Consensus) reportMetrics(block types.Block) { func (consensus *Consensus) reportMetrics(block types.Block) {

@ -15,6 +15,7 @@ import (
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
vrf_bls "github.com/harmony-one/harmony/crypto/vrf/bls" vrf_bls "github.com/harmony-one/harmony/crypto/vrf/bls"
"github.com/harmony-one/harmony/internal/chain"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
@ -200,7 +201,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
return return
} }
if consensus.mode.Mode() == Normal { if consensus.mode.Mode() == Normal {
if err = consensus.VerifyHeader(consensus.ChainReader, &headerObj, true); err != nil { if err = chain.Engine.VerifyHeader(consensus.ChainReader, &headerObj, true); err != nil {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Err(err). Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number.String()). Str("inChain", consensus.ChainReader.CurrentHeader().Number.String()).
@ -507,7 +508,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
return return
} }
if consensus.mode.Mode() == Normal { if consensus.mode.Mode() == Normal {
if err := consensus.VerifyHeader(consensus.ChainReader, blockObj.Header(), true); err != nil { if err := chain.Engine.VerifyHeader(consensus.ChainReader, blockObj.Header(), true); err != nil {
consensus.getLogger().Warn(). consensus.getLogger().Warn().
Err(err). Err(err).
Str("inChain", consensus.ChainReader.CurrentHeader().Number.String()). Str("inChain", consensus.ChainReader.CurrentHeader().Number.String()).
@ -574,6 +575,7 @@ func (consensus *Consensus) onPrepared(msg *msg_pb.Message) {
} }
// Construct and send the commit message // Construct and send the commit message
// TODO: should only sign on block hash
blockNumBytes := make([]byte, 8) blockNumBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumBytes, consensus.blockNum) binary.LittleEndian.PutUint64(blockNumBytes, consensus.blockNum)
commitPayload := append(blockNumBytes, consensus.blockHash[:]...) commitPayload := append(blockNumBytes, consensus.blockHash[:]...)

@ -67,7 +67,7 @@ type Engine interface {
// Note: The block header and state database might be updated to reflect any // Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards). // consensus rules that happen at finalization (e.g. block rewards).
Finalize(chain ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, Finalize(chain ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction,
receipts []*types.Receipt) (*types.Block, error) receipts []*types.Receipt, outcxs []*types.CXReceipt, incxs []*types.CXReceiptsProof) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes // Seal generates a new sealing request for the given input block and pushes
// the result into the given channel. // the result into the given channel.

@ -17,8 +17,13 @@
package core package core
import ( import (
"encoding/binary"
"fmt" "fmt"
"github.com/harmony-one/bls/ffi/go/bls"
bls2 "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
consensus_engine "github.com/harmony-one/harmony/consensus/engine" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
@ -74,7 +79,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// transition, such as amount of used gas, the receipt roots and the state root // transition, such as amount of used gas, the receipt roots and the state root
// itself. ValidateState returns a database batch if the validation was a success // itself. ValidateState returns a database batch if the validation was a success
// otherwise nil and an error is returned. // otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.DB, receipts types.Receipts, usedGas uint64) error { func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.DB, receipts types.Receipts, cxReceipts types.CXReceipts, usedGas uint64) error {
header := block.Header() header := block.Header()
if block.GasUsed() != usedGas { if block.GasUsed() != usedGas {
return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas) return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
@ -90,6 +95,12 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
if receiptSha != header.ReceiptHash { if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
} }
cxsSha := types.DeriveMultipleShardsSha(cxReceipts)
if cxsSha != header.OutgoingReceiptHash {
return fmt.Errorf("invalid cross shard receipt root hash (remote: %x local: %x)", header.OutgoingReceiptHash, cxsSha)
}
// Validate the state root against the received state root and throw // Validate the state root against the received state root and throw
// an error if they don't match. // an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
@ -98,6 +109,59 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
return nil return nil
} }
// VerifyBlockLastCommitSigs verifies the last commit sigs of the block
func VerifyBlockLastCommitSigs(bc *BlockChain, block *types.Block) error {
header := block.Header()
parentBlock := bc.GetBlockByNumber(block.NumberU64() - 1)
if parentBlock == nil {
return ctxerror.New("[VerifyNewBlock] Failed to get parent block", "shardID", header.ShardID, "blockNum", header.Number)
}
parentHeader := parentBlock.Header()
shardState, err := bc.ReadShardState(parentHeader.Epoch)
committee := shardState.FindCommitteeByID(parentHeader.ShardID)
if err != nil || committee == nil {
return ctxerror.New("[VerifyNewBlock] Failed to read shard state for cross link header", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
var committerKeys []*bls.PublicKey
parseKeysSuccess := true
for _, member := range committee.NodeList {
committerKey := new(bls.PublicKey)
err = member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil {
parseKeysSuccess = false
break
}
committerKeys = append(committerKeys, committerKey)
}
if !parseKeysSuccess {
return ctxerror.New("[VerifyNewBlock] cannot convert BLS public key", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
mask, err := bls2.NewMask(committerKeys, nil)
if err != nil {
return ctxerror.New("[VerifyNewBlock] cannot create group sig mask", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
if err := mask.SetMask(header.LastCommitBitmap); err != nil {
return ctxerror.New("[VerifyNewBlock] cannot set group sig mask bits", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
aggSig := bls.Sign{}
err = aggSig.Deserialize(header.LastCommitSignature[:])
if err != nil {
return ctxerror.New("[VerifyNewBlock] unable to deserialize multi-signature from payload").WithCause(err)
}
blockNumBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumBytes, header.Number.Uint64()-1)
commitPayload := append(blockNumBytes, header.ParentHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) {
return ctxerror.New("[VerifyNewBlock] Failed to verify the signature for last commit sig", "shardID", header.ShardID, "blockNum", header.Number)
}
return nil
}
// CalcGasLimit computes the gas limit of the next block after parent. It aims // CalcGasLimit computes the gas limit of the next block after parent. It aims
// to keep the baseline gas above the provided floor, and increase it towards the // to keep the baseline gas above the provided floor, and increase it towards the
// ceil if the blocks are full. If the ceil is exceeded, it will always decrease // ceil if the blocks are full. If the ceil is exceeded, it will always decrease

@ -217,13 +217,13 @@ func (bc *BlockChain) ValidateNewBlock(block *types.Block) error {
} }
// Process block using the parent state as reference point. // Process block using the parent state as reference point.
receipts, _, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) receipts, cxReceipts, _, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
if err != nil { if err != nil {
bc.reportBlock(block, receipts, err) bc.reportBlock(block, receipts, err)
return err return err
} }
err = bc.Validator().ValidateState(block, bc.CurrentBlock(), state, receipts, usedGas) err = bc.Validator().ValidateState(block, bc.CurrentBlock(), state, receipts, cxReceipts, usedGas)
if err != nil { if err != nil {
bc.reportBlock(block, receipts, err) bc.reportBlock(block, receipts, err)
return err return err
@ -412,8 +412,9 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
} }
// ShardID returns the shard Id of the blockchain. // ShardID returns the shard Id of the blockchain.
// TODO: use a better solution before resharding shuffle nodes to different shards
func (bc *BlockChain) ShardID() uint32 { func (bc *BlockChain) ShardID() uint32 {
return uint32(bc.chainConfig.ChainID.Int64()) return bc.CurrentBlock().ShardID()
} }
// GasLimit returns the gas limit of the current HEAD block. // GasLimit returns the gas limit of the current HEAD block.
@ -990,7 +991,7 @@ func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (e
} }
// WriteBlockWithState writes the block and all associated state to the database. // WriteBlockWithState writes the block and all associated state to the database.
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.DB) (status WriteStatus, err error) { func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, cxReceipts []*types.CXReceipt, state *state.DB) (status WriteStatus, err error) {
bc.wg.Add(1) bc.wg.Add(1)
defer bc.wg.Done() defer bc.wg.Done()
@ -1063,10 +1064,24 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
batch := bc.db.NewBatch() batch := bc.db.NewBatch()
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
epoch := block.Header().Epoch
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards())
for i := 0; i < shardNum; i++ {
if i == int(block.ShardID()) {
continue
}
shardReceipts := GetToShardReceipts(cxReceipts, uint32(i))
rawdb.WriteCXReceipts(batch, uint32(i), block.NumberU64(), block.Hash(), shardReceipts, false)
}
// Mark incomingReceipts in the block as spent
bc.WriteCXReceiptsProofSpent(block.IncomingReceipts())
// If the total difficulty is higher than our known, add it to the canonical chain // If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining. // Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
// TODO: figure out reorg issue // TODO: Remove reorg code, it's not used in our code
reorg := true reorg := true
if reorg { if reorg {
// Reorganise the chain if the parent is not the head block // Reorganise the chain if the parent is not the head block
@ -1104,7 +1119,6 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
n, events, logs, err := bc.insertChain(chain) n, events, logs, err := bc.insertChain(chain)
bc.PostChainEvents(events, logs) bc.PostChainEvents(events, logs)
// TODO ek – make this a post-chain event
if err == nil { if err == nil {
for idx, block := range chain { for idx, block := range chain {
header := block.Header() header := block.Header()
@ -1118,10 +1132,29 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
err = bc.WriteShardStateBytes(epoch, header.ShardState) err = bc.WriteShardStateBytes(epoch, header.ShardState)
if err != nil { if err != nil {
header.Logger(utils.Logger()).Warn().Err(err).Msg("cannot store shard state") header.Logger(utils.Logger()).Warn().Err(err).Msg("cannot store shard state")
return n, err
}
}
if len(header.CrossLinks) > 0 {
crossLinks := &types.CrossLinks{}
err = rlp.DecodeBytes(header.CrossLinks, crossLinks)
if err != nil {
header.Logger(utils.Logger()).Warn().Err(err).Msg("[insertChain] cannot parse cross links")
return n, err
}
if !crossLinks.IsSorted() {
header.Logger(utils.Logger()).Warn().Err(err).Msg("[insertChain] cross links are not sorted")
return n, errors.New("proposed cross links are not sorted")
}
for _, crossLink := range *crossLinks {
bc.WriteCrossLinks(types.CrossLinks{crossLink}, false)
bc.DeleteCrossLinks(types.CrossLinks{crossLink}, true)
bc.WriteShardLastCrossLink(crossLink.ShardID(), crossLink)
} }
} }
} }
} }
return n, err return n, err
} }
@ -1272,13 +1305,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
return i, events, coalescedLogs, err return i, events, coalescedLogs, err
} }
// Process block using the parent state as reference point. // Process block using the parent state as reference point.
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) receipts, cxReceipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
if err != nil { if err != nil {
bc.reportBlock(block, receipts, err) bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err return i, events, coalescedLogs, err
} }
// Validate the state using the default validator // Validate the state using the default validator
err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) err = bc.Validator().ValidateState(block, parent, state, receipts, cxReceipts, usedGas)
if err != nil { if err != nil {
bc.reportBlock(block, receipts, err) bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err return i, events, coalescedLogs, err
@ -1286,7 +1319,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
proctime := time.Since(bstart) proctime := time.Since(bstart)
// Write the block to the chain and get the status. // Write the block to the chain and get the status.
status, err := bc.WriteBlockWithState(block, receipts, state) status, err := bc.WriteBlockWithState(block, receipts, cxReceipts, state)
if err != nil { if err != nil {
return i, events, coalescedLogs, err return i, events, coalescedLogs, err
} }
@ -1972,6 +2005,56 @@ func (bc *BlockChain) WriteEpochVdfBlockNum(epoch *big.Int, blockNum *big.Int) e
return nil return nil
} }
// WriteCrossLinks saves the hashes of crosslinks by shardID and blockNum combination key
// temp=true is to write the just received cross link that's not committed into blockchain with consensus
func (bc *BlockChain) WriteCrossLinks(cls []types.CrossLink, temp bool) error {
var err error
for i := 0; i < len(cls); i++ {
cl := cls[i]
err = rawdb.WriteCrossLinkShardBlock(bc.db, cl.ShardID(), cl.BlockNum().Uint64(), cl.Serialize(), temp)
}
return err
}
// DeleteCrossLinks removes the hashes of crosslinks by shardID and blockNum combination key
// temp=true is to write the just received cross link that's not committed into blockchain with consensus
func (bc *BlockChain) DeleteCrossLinks(cls []types.CrossLink, temp bool) error {
var err error
for i := 0; i < len(cls); i++ {
cl := cls[i]
err = rawdb.DeleteCrossLinkShardBlock(bc.db, cl.ShardID(), cl.BlockNum().Uint64(), temp)
}
return err
}
// ReadCrossLink retrieves crosslink given shardID and blockNum.
// temp=true is to retrieve the just received cross link that's not committed into blockchain with consensus
func (bc *BlockChain) ReadCrossLink(shardID uint32, blockNum uint64, temp bool) (*types.CrossLink, error) {
bytes, err := rawdb.ReadCrossLinkShardBlock(bc.db, shardID, blockNum, temp)
if err != nil {
return nil, err
}
crossLink, err := types.DeserializeCrossLink(bytes)
return crossLink, err
}
// WriteShardLastCrossLink saves the last crosslink of a shard
func (bc *BlockChain) WriteShardLastCrossLink(shardID uint32, cl types.CrossLink) error {
return rawdb.WriteShardLastCrossLink(bc.db, cl.ShardID(), cl.Serialize())
}
// ReadShardLastCrossLink retrieves the last crosslink of a shard.
func (bc *BlockChain) ReadShardLastCrossLink(shardID uint32) (*types.CrossLink, error) {
bytes, err := rawdb.ReadShardLastCrossLink(bc.db, shardID)
if err != nil {
return nil, err
}
crossLink, err := types.DeserializeCrossLink(bytes)
return crossLink, err
}
// IsSameLeaderAsPreviousBlock retrieves a block from the database by number, caching it // IsSameLeaderAsPreviousBlock retrieves a block from the database by number, caching it
func (bc *BlockChain) IsSameLeaderAsPreviousBlock(block *types.Block) bool { func (bc *BlockChain) IsSameLeaderAsPreviousBlock(block *types.Block) bool {
if IsEpochBlock(block) { if IsEpochBlock(block) {
@ -1993,3 +2076,146 @@ func (bc *BlockChain) ChainDB() ethdb.Database {
func (bc *BlockChain) GetVMConfig() *vm.Config { func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig return &bc.vmConfig
} }
// GetToShardReceipts filters the cross shard receipts with given destination shardID
func GetToShardReceipts(cxReceipts types.CXReceipts, shardID uint32) types.CXReceipts {
cxs := types.CXReceipts{}
for i := range cxReceipts {
cx := cxReceipts[i]
if cx.ToShardID == shardID {
cxs = append(cxs, cx)
}
}
return cxs
}
// ReadCXReceipts retrieves the cross shard transaction receipts of a given shard
// temp=true is to retrieve the just received receipts that's not committed into blockchain with consensus
func (bc *BlockChain) ReadCXReceipts(shardID uint32, blockNum uint64, blockHash common.Hash, temp bool) (types.CXReceipts, error) {
cxs, err := rawdb.ReadCXReceipts(bc.db, shardID, blockNum, blockHash, temp)
if err != nil || len(cxs) == 0 {
return nil, err
}
return cxs, nil
}
// WriteCXReceipts saves the cross shard transaction receipts of a given shard
// temp=true is to store the just received receipts that's not committed into blockchain with consensus
func (bc *BlockChain) WriteCXReceipts(shardID uint32, blockNum uint64, blockHash common.Hash, receipts types.CXReceipts, temp bool) error {
return rawdb.WriteCXReceipts(bc.db, shardID, blockNum, blockHash, receipts, temp)
}
// CXMerkleProof calculates the cross shard transaction merkle proof of a given destination shard
func (bc *BlockChain) CXMerkleProof(shardID uint32, block *types.Block) (*types.CXMerkleProof, error) {
proof := &types.CXMerkleProof{BlockNum: block.Number(), BlockHash: block.Hash(), ShardID: block.ShardID(), CXReceiptHash: block.Header().OutgoingReceiptHash, CXShardHashes: []common.Hash{}, ShardIDs: []uint32{}}
cxs, err := rawdb.ReadCXReceipts(bc.db, shardID, block.NumberU64(), block.Hash(), false)
if err != nil || cxs == nil {
return nil, err
}
epoch := block.Header().Epoch
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards())
for i := 0; i < shardNum; i++ {
receipts, err := bc.ReadCXReceipts(uint32(i), block.NumberU64(), block.Hash(), false)
if err != nil || len(receipts) == 0 {
continue
} else {
hash := types.DeriveSha(receipts)
proof.CXShardHashes = append(proof.CXShardHashes, hash)
proof.ShardIDs = append(proof.ShardIDs, uint32(i))
}
}
if len(proof.ShardIDs) == 0 {
return nil, nil
}
return proof, nil
}
// LatestCXReceiptsCheckpoint returns the latest checkpoint
func (bc *BlockChain) LatestCXReceiptsCheckpoint(shardID uint32) uint64 {
blockNum, _ := rawdb.ReadCXReceiptsProofUnspentCheckpoint(bc.db, shardID)
return blockNum
}
// NextCXReceiptsCheckpoint returns the next checkpoint blockNum
func (bc *BlockChain) NextCXReceiptsCheckpoint(currentNum uint64, shardID uint32) uint64 {
lastCheckpoint, _ := rawdb.ReadCXReceiptsProofUnspentCheckpoint(bc.db, shardID)
newCheckpoint := lastCheckpoint
// the new checkpoint will not exceed currentNum+1
for num := lastCheckpoint; num <= currentNum+1; num++ {
by, _ := rawdb.ReadCXReceiptsProofSpent(bc.db, shardID, num)
if by == rawdb.NAByte {
// TODO: check if there is IncompingReceiptsHash in crosslink header
// if the rootHash is non-empty, it means incomingReceipts are not delivered
// otherwise, it means there is no cross-shard transactions for this block
newCheckpoint = num
continue
}
if by == rawdb.SpentByte {
newCheckpoint = num
continue
}
// the first unspent blockHash found, break the loop
newCheckpoint = num
break
}
return newCheckpoint
}
// cleanCXReceiptsCheckpoints will update the checkpoint and clean spent receipts upto checkpoint
func (bc *BlockChain) cleanCXReceiptsCheckpoints(shardID uint32, currentNum uint64) {
lastCheckpoint, err := rawdb.ReadCXReceiptsProofUnspentCheckpoint(bc.db, shardID)
if err != nil {
utils.Logger().Warn().Msg("[cleanCXReceiptsCheckpoints] Cannot get lastCheckpoint")
}
newCheckpoint := bc.NextCXReceiptsCheckpoint(currentNum, shardID)
if lastCheckpoint == newCheckpoint {
return
}
utils.Logger().Debug().Uint64("lastCheckpoint", lastCheckpoint).Uint64("newCheckpont", newCheckpoint).Msg("[CleanCXReceiptsCheckpoints]")
for num := lastCheckpoint; num < newCheckpoint; num++ {
rawdb.DeleteCXReceiptsProofSpent(bc.db, shardID, num)
}
}
// WriteCXReceiptsProofSpent mark the CXReceiptsProof list with given unspent status
// true: unspent, false: spent
func (bc *BlockChain) WriteCXReceiptsProofSpent(cxps []*types.CXReceiptsProof) {
for _, cxp := range cxps {
rawdb.WriteCXReceiptsProofSpent(bc.db, cxp)
}
}
// IsSpent checks whether a CXReceiptsProof is unspent
func (bc *BlockChain) IsSpent(cxp *types.CXReceiptsProof) bool {
shardID := cxp.MerkleProof.ShardID
blockNum := cxp.MerkleProof.BlockNum.Uint64()
by, _ := rawdb.ReadCXReceiptsProofSpent(bc.db, shardID, blockNum)
if by == rawdb.SpentByte || cxp.MerkleProof.BlockNum.Uint64() < bc.LatestCXReceiptsCheckpoint(cxp.MerkleProof.ShardID) {
return true
}
return false
}
// CleanCXReceiptsCheckpointsByBlock cleans checkpoints based on incomingReceipts of the given block
func (bc *BlockChain) CleanCXReceiptsCheckpointsByBlock(block *types.Block) {
m := make(map[uint32]uint64)
for _, cxp := range block.IncomingReceipts() {
shardID := cxp.MerkleProof.ShardID
blockNum := cxp.MerkleProof.BlockNum.Uint64()
if _, ok := m[shardID]; !ok {
m[shardID] = blockNum
} else if m[shardID] < blockNum {
m[shardID] = blockNum
}
}
for k, v := range m {
utils.Logger().Debug().Uint32("shardID", k).Uint64("blockNum", v).Msg("[CleanCXReceiptsCheckpoints] Cleaning CXReceiptsProof upto")
bc.cleanCXReceiptsCheckpoints(k, v)
}
}

@ -96,7 +96,8 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
b.SetCoinbase(common.Address{}) b.SetCoinbase(common.Address{})
} }
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
receipt, _, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{}) // TODO (chao): may need to add cxReceipt for BlockGen
receipt, _, _, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -185,7 +186,8 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
if b.engine != nil { if b.engine != nil {
// Finalize and seal the block // Finalize and seal the block
block, err := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts) // TODO (chao): add cxReceipt in the last input
block, err := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.receipts, nil, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }

@ -9,13 +9,13 @@ import (
) )
func TestIsEpochBlock(t *testing.T) { func TestIsEpochBlock(t *testing.T) {
block1 := types.NewBlock(&types.Header{Number: big.NewInt(10)}, nil, nil) block1 := types.NewBlock(&types.Header{Number: big.NewInt(10)}, nil, nil, nil, nil)
block2 := types.NewBlock(&types.Header{Number: big.NewInt(0)}, nil, nil) block2 := types.NewBlock(&types.Header{Number: big.NewInt(0)}, nil, nil, nil, nil)
block3 := types.NewBlock(&types.Header{Number: big.NewInt(344064)}, nil, nil) block3 := types.NewBlock(&types.Header{Number: big.NewInt(344064)}, nil, nil, nil, nil)
block4 := types.NewBlock(&types.Header{Number: big.NewInt(77)}, nil, nil) block4 := types.NewBlock(&types.Header{Number: big.NewInt(77)}, nil, nil, nil, nil)
block5 := types.NewBlock(&types.Header{Number: big.NewInt(78)}, nil, nil) block5 := types.NewBlock(&types.Header{Number: big.NewInt(78)}, nil, nil, nil, nil)
block6 := types.NewBlock(&types.Header{Number: big.NewInt(188)}, nil, nil) block6 := types.NewBlock(&types.Header{Number: big.NewInt(188)}, nil, nil, nil, nil)
block7 := types.NewBlock(&types.Header{Number: big.NewInt(189)}, nil, nil) block7 := types.NewBlock(&types.Header{Number: big.NewInt(189)}, nil, nil, nil, nil)
tests := []struct { tests := []struct {
schedule shardingconfig.Schedule schedule shardingconfig.Schedule
block *types.Block block *types.Block

@ -91,7 +91,11 @@ func CanTransfer(db vm.StateDB, addr common.Address, amount *big.Int) bool {
} }
// Transfer subtracts amount from sender and adds amount to recipient using the given Db // Transfer subtracts amount from sender and adds amount to recipient using the given Db
func Transfer(db vm.StateDB, sender, recipient common.Address, amount *big.Int) { func Transfer(db vm.StateDB, sender, recipient common.Address, amount *big.Int, txType types.TransactionType) {
db.SubBalance(sender, amount) if txType == types.SameShardTx || txType == types.SubtractionOnly {
db.AddBalance(recipient, amount) db.SubBalance(sender, amount)
}
if txType == types.SameShardTx || txType == types.AdditionOnly {
db.AddBalance(recipient, amount)
}
} }

@ -261,7 +261,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
statedb.Commit(false) statedb.Commit(false)
statedb.Database().TrieDB().Commit(root, true) statedb.Database().TrieDB().Commit(root, true)
return types.NewBlock(head, nil, nil) return types.NewBlock(head, nil, nil, nil, nil)
} }
// Commit writes the block and state of a genesis specification to the database. // Commit writes the block and state of a genesis specification to the database.

@ -29,6 +29,13 @@ import (
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
) )
// Indicate whether the receipts corresponding to a blockHash is spent or not
const (
SpentByte byte = iota
UnspentByte
NAByte // not exist
)
// ReadCanonicalHash retrieves the hash assigned to a canonical block number. // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash { func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
data, _ := db.Get(headerHashKey(number)) data, _ := db.Get(headerHashKey(number))
@ -332,7 +339,7 @@ func ReadBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block
if body == nil { if body == nil {
return nil return nil
} }
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles, body.IncomingReceipts)
} }
// WriteBlock serializes a block into the database, header and body separately. // WriteBlock serializes a block into the database, header and body separately.
@ -464,7 +471,6 @@ func WriteLastCommits(
if err = db.Put(lastCommitsKey, data); err != nil { if err = db.Put(lastCommitsKey, data); err != nil {
return ctxerror.New("cannot write last commits").WithCause(err) return ctxerror.New("cannot write last commits").WithCause(err)
} }
utils.GetLogger().Info("wrote last commits", "numShards", len(data))
return nil return nil
} }
@ -502,3 +508,103 @@ func ReadEpochVdfBlockNum(db DatabaseReader, epoch *big.Int) ([]byte, error) {
func WriteEpochVdfBlockNum(db DatabaseWriter, epoch *big.Int, data []byte) error { func WriteEpochVdfBlockNum(db DatabaseWriter, epoch *big.Int, data []byte) error {
return db.Put(epochVdfBlockNumberKey(epoch), data) return db.Put(epochVdfBlockNumberKey(epoch), data)
} }
// ReadCrossLinkShardBlock retrieves the blockHash given shardID and blockNum
func ReadCrossLinkShardBlock(db DatabaseReader, shardID uint32, blockNum uint64, temp bool) ([]byte, error) {
return db.Get(crosslinkKey(shardID, blockNum, temp))
}
// WriteCrossLinkShardBlock stores the blockHash given shardID and blockNum
func WriteCrossLinkShardBlock(db DatabaseWriter, shardID uint32, blockNum uint64, data []byte, temp bool) error {
return db.Put(crosslinkKey(shardID, blockNum, temp), data)
}
// DeleteCrossLinkShardBlock deletes the blockHash given shardID and blockNum
func DeleteCrossLinkShardBlock(db DatabaseDeleter, shardID uint32, blockNum uint64, temp bool) error {
return db.Delete(crosslinkKey(shardID, blockNum, temp))
}
// ReadShardLastCrossLink read the last cross link of a shard
func ReadShardLastCrossLink(db DatabaseReader, shardID uint32) ([]byte, error) {
return db.Get(shardLastCrosslinkKey(shardID))
}
// WriteShardLastCrossLink stores the last cross link of a shard
func WriteShardLastCrossLink(db DatabaseWriter, shardID uint32, data []byte) error {
return db.Put(shardLastCrosslinkKey(shardID), data)
}
// ReadCXReceipts retrieves all the transactions of receipts given destination shardID, number and blockHash
func ReadCXReceipts(db DatabaseReader, shardID uint32, number uint64, hash common.Hash, temp bool) (types.CXReceipts, error) {
data, err := db.Get(cxReceiptKey(shardID, number, hash, temp))
if len(data) == 0 || err != nil {
utils.Logger().Info().Err(err).Uint64("number", number).Int("dataLen", len(data)).Msg("ReadCXReceipts")
return nil, err
}
cxReceipts := types.CXReceipts{}
if err := rlp.DecodeBytes(data, &cxReceipts); err != nil {
utils.Logger().Error().Err(err).Str("hash", hash.Hex()).Msg("Invalid cross-shard tx receipt array RLP")
return nil, err
}
return cxReceipts, nil
}
// WriteCXReceipts stores all the transaction receipts given destination shardID, blockNumber and blockHash
func WriteCXReceipts(db DatabaseWriter, shardID uint32, number uint64, hash common.Hash, receipts types.CXReceipts, temp bool) error {
bytes, err := rlp.EncodeToBytes(receipts)
if err != nil {
utils.Logger().Error().Msg("[WriteCXReceipts] Failed to encode cross shard tx receipts")
}
// Store the receipt slice
if err := db.Put(cxReceiptKey(shardID, number, hash, temp), bytes); err != nil {
utils.Logger().Error().Msg("[WriteCXReceipts] Failed to store cxreceipts")
}
return err
}
// DeleteCXReceipts removes all receipt data associated with a block hash.
func DeleteCXReceipts(db DatabaseDeleter, shardID uint32, number uint64, hash common.Hash, temp bool) {
if err := db.Delete(cxReceiptKey(shardID, number, hash, temp)); err != nil {
utils.Logger().Error().Msg("Failed to delete cross shard tx receipts")
}
}
// ReadCXReceiptsProofSpent check whether a CXReceiptsProof is unspent
func ReadCXReceiptsProofSpent(db DatabaseReader, shardID uint32, number uint64) (byte, error) {
data, err := db.Get(cxReceiptSpentKey(shardID, number))
if err != nil || len(data) == 0 {
return NAByte, ctxerror.New("[ReadCXReceiptsProofSpent] Cannot find the key", "shardID", shardID, "number", number).WithCause(err)
}
return data[0], nil
}
// WriteCXReceiptsProofSpent write CXReceiptsProof as spent into database
func WriteCXReceiptsProofSpent(dbw DatabaseWriter, cxp *types.CXReceiptsProof) error {
shardID := cxp.MerkleProof.ShardID
blockNum := cxp.MerkleProof.BlockNum.Uint64()
return dbw.Put(cxReceiptSpentKey(shardID, blockNum), []byte{SpentByte})
}
// DeleteCXReceiptsProofSpent removes unspent indicator of a given blockHash
func DeleteCXReceiptsProofSpent(db DatabaseDeleter, shardID uint32, number uint64) {
if err := db.Delete(cxReceiptSpentKey(shardID, number)); err != nil {
utils.Logger().Error().Msg("Failed to delete receipts unspent indicator")
}
}
// ReadCXReceiptsProofUnspentCheckpoint returns the last unspent blocknumber
func ReadCXReceiptsProofUnspentCheckpoint(db DatabaseReader, shardID uint32) (uint64, error) {
by, err := db.Get(cxReceiptUnspentCheckpointKey(shardID))
if err != nil {
return 0, ctxerror.New("[ReadCXReceiptsProofUnspent] Cannot Unspent Checkpoint", "shardID", shardID).WithCause(err)
}
lastCheckpoint := binary.BigEndian.Uint64(by[:])
return lastCheckpoint, nil
}
// WriteCXReceiptsProofUnspentCheckpoint check whether a CXReceiptsProof is unspent, true means not spent
func WriteCXReceiptsProofUnspentCheckpoint(db DatabaseWriter, shardID uint32, blockNum uint64) error {
by := make([]byte, 8)
binary.BigEndian.PutUint64(by[:], blockNum)
return db.Put(cxReceiptUnspentCheckpointKey(shardID), by)
}

@ -34,7 +34,7 @@ func TestLookupStorage(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), 0, big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3} txs := []*types.Transaction{tx1, tx2, tx3}
block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil) block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil)
// Check that no transactions entries are in a pristine database // Check that no transactions entries are in a pristine database
for i, tx := range txs { for i, tx := range txs {

@ -60,6 +60,16 @@ var (
preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db configPrefix = []byte("ethereum-config-") // config prefix for the db
shardLastCrosslinkPrefix = []byte("shard-last-cross-link") // prefix for shard last crosslink
crosslinkPrefix = []byte("crosslink") // prefix for crosslink
tempCrosslinkPrefix = []byte("tempCrosslink") // prefix for tempCrosslink
cxReceiptPrefix = []byte("cxReceipt") // prefix for cross shard transaction receipt
tempCxReceiptPrefix = []byte("tempCxReceipt") // prefix for temporary cross shard transaction receipt
cxReceiptHashPrefix = []byte("cxReceiptHash") // prefix for cross shard transaction receipt hash
cxReceiptSpentPrefix = []byte("cxReceiptSpent") // prefix for indicator of unspent of cxReceiptsProof
cxReceiptUnspentCheckpointPrefix = []byte("cxReceiptUnspentCheckpoint") // prefix for cxReceiptsProof unspent checkpoint
// epochBlockNumberPrefix + epoch (big.Int.Bytes()) // epochBlockNumberPrefix + epoch (big.Int.Bytes())
// -> epoch block number (big.Int.Bytes()) // -> epoch block number (big.Int.Bytes())
epochBlockNumberPrefix = []byte("harmony-epoch-block-number-") epochBlockNumberPrefix = []byte("harmony-epoch-block-number-")
@ -162,3 +172,52 @@ func epochVrfBlockNumbersKey(epoch *big.Int) []byte {
func epochVdfBlockNumberKey(epoch *big.Int) []byte { func epochVdfBlockNumberKey(epoch *big.Int) []byte {
return append(epochVdfBlockNumberPrefix, epoch.Bytes()...) return append(epochVdfBlockNumberPrefix, epoch.Bytes()...)
} }
func shardLastCrosslinkKey(shardID uint32) []byte {
sbKey := make([]byte, 4)
binary.BigEndian.PutUint32(sbKey, shardID)
key := append(crosslinkPrefix, sbKey...)
return key
}
func crosslinkKey(shardID uint32, blockNum uint64, temp bool) []byte {
prefix := crosslinkPrefix
if temp {
prefix = tempCrosslinkPrefix
}
sbKey := make([]byte, 12)
binary.BigEndian.PutUint32(sbKey, shardID)
binary.BigEndian.PutUint64(sbKey[4:], blockNum)
key := append(prefix, sbKey...)
return key
}
// cxReceiptKey = cxReceiptsPrefix + shardID + num (uint64 big endian) + hash
func cxReceiptKey(shardID uint32, number uint64, hash common.Hash, temp bool) []byte {
prefix := cxReceiptPrefix
if temp {
prefix = tempCxReceiptPrefix
}
sKey := make([]byte, 4)
binary.BigEndian.PutUint32(sKey, shardID)
tmp := append(prefix, sKey...)
tmp1 := append(tmp, encodeBlockNumber(number)...)
return append(tmp1, hash.Bytes()...)
}
// cxReceiptSpentKey = cxReceiptsSpentPrefix + shardID + num (uint64 big endian)
func cxReceiptSpentKey(shardID uint32, number uint64) []byte {
prefix := cxReceiptSpentPrefix
sKey := make([]byte, 4)
binary.BigEndian.PutUint32(sKey, shardID)
tmp := append(prefix, sKey...)
return append(tmp, encodeBlockNumber(number)...)
}
// cxReceiptUnspentCheckpointKey = cxReceiptsUnspentCheckpointPrefix + shardID
func cxReceiptUnspentCheckpointKey(shardID uint32) []byte {
prefix := cxReceiptUnspentCheckpointPrefix
sKey := make([]byte, 4)
binary.BigEndian.PutUint32(sKey, shardID)
return append(prefix, sKey...)
}

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
) )
@ -675,6 +676,6 @@ func (db *DB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
} }
return nil return nil
}) })
log.Debug("Trie cache stats after commit", "misses", trie.CacheMisses(), "unloads", trie.CacheUnloads()) //log.Debug("Trie cache stats after commit", "misses", trie.CacheMisses(), "unloads", trie.CacheUnloads())
return root, err return root, err
} }

@ -17,6 +17,8 @@
package core package core
import ( import (
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -25,6 +27,7 @@ import (
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils"
) )
// StateProcessor is a basic Processor, which takes care of transitioning // StateProcessor is a basic Processor, which takes care of transitioning
@ -53,56 +56,83 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen
// Process returns the receipts and logs accumulated during the process and // Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the // returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error. // transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.DB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) { func (p *StateProcessor) Process(block *types.Block, statedb *state.DB, cfg vm.Config) (types.Receipts, types.CXReceipts, []*types.Log, uint64, error) {
var ( var (
receipts types.Receipts receipts types.Receipts
outcxs types.CXReceipts
incxs = block.IncomingReceipts()
usedGas = new(uint64) usedGas = new(uint64)
header = block.Header() header = block.Header()
coinbase = block.Header().Coinbase coinbase = block.Header().Coinbase
allLogs []*types.Log allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit()) gp = new(GasPool).AddGas(block.GasLimit())
) )
// Mutate the block and state according to any hard-fork specs
//if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
// misc.ApplyDAOHardFork(statedb)
//}
// Iterate over and process the individual transactions // Iterate over and process the individual transactions
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {
statedb.Prepare(tx.Hash(), block.Hash(), i) statedb.Prepare(tx.Hash(), block.Hash(), i)
receipt, _, err := ApplyTransaction(p.config, p.bc, &coinbase, gp, statedb, header, tx, usedGas, cfg) receipt, cxReceipt, _, err := ApplyTransaction(p.config, p.bc, &coinbase, gp, statedb, header, tx, usedGas, cfg)
if err != nil { if err != nil {
return nil, nil, 0, err return nil, nil, nil, 0, err
} }
receipts = append(receipts, receipt) receipts = append(receipts, receipt)
if cxReceipt != nil {
outcxs = append(outcxs, cxReceipt)
}
allLogs = append(allLogs, receipt.Logs...) allLogs = append(allLogs, receipt.Logs...)
} }
// incomingReceipts should always be processed after transactions (to be consistent with the block proposal)
for _, cx := range block.IncomingReceipts() {
ApplyIncomingReceipt(p.config, statedb, header, cx)
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards) // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
_, err := p.engine.Finalize(p.bc, header, statedb, block.Transactions(), receipts) _, err := p.engine.Finalize(p.bc, header, statedb, block.Transactions(), receipts, outcxs, incxs)
if err != nil { if err != nil {
return nil, nil, 0, ctxerror.New("cannot finalize block").WithCause(err) return nil, nil, nil, 0, ctxerror.New("cannot finalize block").WithCause(err)
} }
return receipts, allLogs, *usedGas, nil return receipts, outcxs, allLogs, *usedGas, nil
}
// return true if it is valid
func getTransactionType(header *types.Header, tx *types.Transaction) types.TransactionType {
if tx.ShardID() == tx.ToShardID() && header.ShardID == tx.ShardID() {
return types.SameShardTx
}
if tx.ShardID() != tx.ToShardID() && header.ShardID == tx.ShardID() {
return types.SubtractionOnly
}
return types.InvalidTx
} }
// ApplyTransaction attempts to apply a transaction to the given state database // ApplyTransaction attempts to apply a transaction to the given state database
// and uses the input parameters for its environment. It returns the receipt // and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed, // for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid. // indicating the block was invalid.
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.DB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) { func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.DB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, *types.CXReceipt, uint64, error) {
txType := getTransactionType(header, tx)
if txType == types.InvalidTx {
return nil, nil, 0, fmt.Errorf("Invalid Transaction Type")
}
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) msg, err := tx.AsMessage(types.MakeSigner(config, header.Number))
// skip signer err for additiononly tx
if err != nil { if err != nil {
return nil, 0, err return nil, nil, 0, err
} }
// Create a new context to be used in the EVM environment // Create a new context to be used in the EVM environment
context := NewEVMContext(msg, header, bc, author) context := NewEVMContext(msg, header, bc, author)
context.TxType = txType
// Create a new environment which holds all relevant information // Create a new environment which holds all relevant information
// about the transaction and calling mechanisms. // about the transaction and calling mechanisms.
vmenv := vm.NewEVM(context, statedb, config, cfg) vmenv := vm.NewEVM(context, statedb, config, cfg)
// Apply the transaction to the current state (included in the env) // Apply the transaction to the current state (included in the env)
_, gas, failed, err := ApplyMessage(vmenv, msg, gp) _, gas, failed, err := ApplyMessage(vmenv, msg, gp)
if err != nil { if err != nil {
return nil, 0, err return nil, nil, 0, err
} }
// Update the state with pending changes // Update the state with pending changes
var root []byte var root []byte
@ -126,5 +156,34 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
//receipt.Logs = statedb.GetLogs(tx.Hash()) //receipt.Logs = statedb.GetLogs(tx.Hash())
receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
return receipt, gas, err var cxReceipt *types.CXReceipt
if txType == types.SubtractionOnly {
cxReceipt = &types.CXReceipt{tx.Hash(), msg.From(), msg.To(), tx.ShardID(), tx.ToShardID(), msg.Value()}
} else {
cxReceipt = nil
}
return receipt, cxReceipt, gas, err
}
// ApplyIncomingReceipt will add amount into ToAddress in the receipt
func ApplyIncomingReceipt(config *params.ChainConfig, db *state.DB, header *types.Header, cxp *types.CXReceiptsProof) {
if cxp == nil {
return
}
// TODO: how to charge gas here?
for _, cx := range cxp.Receipts {
if cx == nil || cx.To == nil { // should not happend
utils.Logger().Warn().Msg("ApplyIncomingReceipts: Invalid incoming receipt!!")
continue
}
utils.Logger().Info().Msgf("ApplyIncomingReceipts: ADDING BALANCE %d", cx.Amount)
if !db.Exist(*cx.To) {
db.CreateAccount(*cx.To)
}
db.AddBalance(*cx.To, cx.Amount)
db.IntermediateRoot(config.IsEIP158(header.Number)).Bytes()
}
} }

@ -218,6 +218,7 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
// The only possible consensus-error would be if there wasn't // The only possible consensus-error would be if there wasn't
// sufficient balance to make the transfer happen. The first // sufficient balance to make the transfer happen. The first
// balance transfer may never fail. // balance transfer may never fail.
if vmerr == vm.ErrInsufficientBalance { if vmerr == vm.ErrInsufficientBalance {
return nil, 0, false, vmerr return nil, 0, false, vmerr
} }

@ -54,7 +54,7 @@ type testBlockChain struct {
func (bc *testBlockChain) CurrentBlock() *types.Block { func (bc *testBlockChain) CurrentBlock() *types.Block {
return types.NewBlock(&types.Header{ return types.NewBlock(&types.Header{
GasLimit: bc.gasLimit, GasLimit: bc.gasLimit,
}, nil, nil) }, nil, nil, nil, nil)
} }
func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {

@ -32,7 +32,7 @@ type Validator interface {
// ValidateState validates the given statedb and optionally the receipts and // ValidateState validates the given statedb and optionally the receipts and
// gas used. // gas used.
ValidateState(block, parent *types.Block, state *state.DB, receipts types.Receipts, usedGas uint64) error ValidateState(block, parent *types.Block, state *state.DB, receipts types.Receipts, cxs types.CXReceipts, usedGas uint64) error
} }
// Processor is an interface for processing blocks using a given initial state. // Processor is an interface for processing blocks using a given initial state.
@ -42,5 +42,5 @@ type Validator interface {
// of gas used in the process and return an error if any of the internal rules // of gas used in the process and return an error if any of the internal rules
// failed. // failed.
type Processor interface { type Processor interface {
Process(block *types.Block, statedb *state.DB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) Process(block *types.Block, statedb *state.DB, cfg vm.Config) (types.Receipts, types.CXReceipts, []*types.Log, uint64, error)
} }

@ -71,18 +71,20 @@ func (n *BlockNonce) UnmarshalText(input []byte) error {
// Header represents a block header in the Harmony blockchain. // Header represents a block header in the Harmony blockchain.
type Header struct { type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
Coinbase common.Address `json:"miner" gencodec:"required"` Coinbase common.Address `json:"miner" gencodec:"required"`
Root common.Hash `json:"stateRoot" gencodec:"required"` Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom ethtypes.Bloom `json:"logsBloom" gencodec:"required"` OutgoingReceiptHash common.Hash `json:"outgoingReceiptsRoot" gencodec:"required"`
Number *big.Int `json:"number" gencodec:"required"` IncomingReceiptHash common.Hash `json:"incomingReceiptsRoot" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"` Bloom ethtypes.Bloom `json:"logsBloom" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"` Number *big.Int `json:"number" gencodec:"required"`
Time *big.Int `json:"timestamp" gencodec:"required"` GasLimit uint64 `json:"gasLimit" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"` GasUsed uint64 `json:"gasUsed" gencodec:"required"`
MixDigest common.Hash `json:"mixHash" gencodec:"required"` Time *big.Int `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
// Additional Fields // Additional Fields
ViewID *big.Int `json:"viewID" gencodec:"required"` ViewID *big.Int `json:"viewID" gencodec:"required"`
Epoch *big.Int `json:"epoch" gencodec:"required"` Epoch *big.Int `json:"epoch" gencodec:"required"`
@ -93,6 +95,7 @@ type Header struct {
Vrf []byte `json:"vrf"` Vrf []byte `json:"vrf"`
Vdf []byte `json:"vdf"` Vdf []byte `json:"vdf"`
ShardState []byte `json:"shardState"` ShardState []byte `json:"shardState"`
CrossLinks []byte `json:"crossLink"`
} }
// field type overrides for gencodec // field type overrides for gencodec
@ -151,15 +154,17 @@ func rlpHash(x interface{}) (h common.Hash) {
// Body is a simple (mutable, non-safe) data container for storing and moving // Body is a simple (mutable, non-safe) data container for storing and moving
// a block's data contents (transactions and uncles) together. // a block's data contents (transactions and uncles) together.
type Body struct { type Body struct {
Transactions []*Transaction Transactions []*Transaction
Uncles []*Header Uncles []*Header
IncomingReceipts CXReceiptsProofs
} }
// Block represents an entire block in the Ethereum blockchain. // Block represents an entire block in the Ethereum blockchain.
type Block struct { type Block struct {
header *Header header *Header
uncles []*Header uncles []*Header
transactions Transactions transactions Transactions
incomingReceipts CXReceiptsProofs
// caches // caches
hash atomic.Value hash atomic.Value
@ -203,9 +208,10 @@ type StorageBlock Block
// "external" block encoding. used for eth protocol, etc. // "external" block encoding. used for eth protocol, etc.
type extblock struct { type extblock struct {
Header *Header Header *Header
Txs []*Transaction Txs []*Transaction
Uncles []*Header Uncles []*Header
IncomingReceipts CXReceiptsProofs
} }
// [deprecated by eth/63] // [deprecated by eth/63]
@ -224,7 +230,7 @@ type storageblock struct {
// The values of TxHash, UncleHash, ReceiptHash and Bloom in header // The values of TxHash, UncleHash, ReceiptHash and Bloom in header
// are ignored and set to values derived from the given txs, // are ignored and set to values derived from the given txs,
// and receipts. // and receipts.
func NewBlock(header *Header, txs []*Transaction, receipts []*Receipt) *Block { func NewBlock(header *Header, txs []*Transaction, receipts []*Receipt, outcxs []*CXReceipt, incxs []*CXReceiptsProof) *Block {
b := &Block{header: CopyHeader(header)} b := &Block{header: CopyHeader(header)}
// TODO: panic if len(txs) != len(receipts) // TODO: panic if len(txs) != len(receipts)
@ -243,6 +249,16 @@ func NewBlock(header *Header, txs []*Transaction, receipts []*Receipt) *Block {
b.header.Bloom = CreateBloom(receipts) b.header.Bloom = CreateBloom(receipts)
} }
b.header.OutgoingReceiptHash = DeriveMultipleShardsSha(CXReceipts(outcxs))
if len(incxs) == 0 {
b.header.IncomingReceiptHash = EmptyRootHash
} else {
b.header.IncomingReceiptHash = DeriveSha(CXReceiptsProofs(incxs))
b.incomingReceipts = make(CXReceiptsProofs, len(incxs))
copy(b.incomingReceipts, incxs)
}
return b return b
} }
@ -286,10 +302,14 @@ func CopyHeader(h *Header) *Header {
cpy.Vdf = make([]byte, len(h.Vdf)) cpy.Vdf = make([]byte, len(h.Vdf))
copy(cpy.Vdf, h.Vdf) copy(cpy.Vdf, h.Vdf)
} }
//if len(h.CrossLinks) > 0 { if len(h.CrossLinks) > 0 {
// cpy.CrossLinks = make([]byte, len(h.CrossLinks)) cpy.CrossLinks = make([]byte, len(h.CrossLinks))
// copy(cpy.CrossLinks, h.CrossLinks) copy(cpy.CrossLinks, h.CrossLinks)
//} }
if len(h.LastCommitBitmap) > 0 {
cpy.LastCommitBitmap = make([]byte, len(h.LastCommitBitmap))
copy(cpy.LastCommitBitmap, h.LastCommitBitmap)
}
return &cpy return &cpy
} }
@ -300,7 +320,7 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error {
if err := s.Decode(&eb); err != nil { if err := s.Decode(&eb); err != nil {
return err return err
} }
b.header, b.uncles, b.transactions = eb.Header, eb.Uncles, eb.Txs b.header, b.uncles, b.transactions, b.incomingReceipts = eb.Header, eb.Uncles, eb.Txs, eb.IncomingReceipts
b.size.Store(common.StorageSize(rlp.ListSize(size))) b.size.Store(common.StorageSize(rlp.ListSize(size)))
return nil return nil
} }
@ -308,9 +328,10 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error {
// EncodeRLP serializes b into the Ethereum RLP block format. // EncodeRLP serializes b into the Ethereum RLP block format.
func (b *Block) EncodeRLP(w io.Writer) error { func (b *Block) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, extblock{ return rlp.Encode(w, extblock{
Header: b.header, Header: b.header,
Txs: b.transactions, Txs: b.transactions,
Uncles: b.uncles, Uncles: b.uncles,
IncomingReceipts: b.incomingReceipts,
}) })
} }
@ -335,6 +356,11 @@ func (b *Block) Transactions() Transactions {
return b.transactions return b.transactions
} }
// IncomingReceipts returns verified outgoing receipts
func (b *Block) IncomingReceipts() CXReceiptsProofs {
return b.incomingReceipts
}
// Transaction returns Transaction. // Transaction returns Transaction.
func (b *Block) Transaction(hash common.Hash) *Transaction { func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions { for _, transaction := range b.transactions {
@ -387,6 +413,9 @@ func (b *Block) TxHash() common.Hash { return b.header.TxHash }
// ReceiptHash returns header receipt hash. // ReceiptHash returns header receipt hash.
func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash } func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
// OutgoingReceiptHash returns header cross shard receipt hash.
func (b *Block) OutgoingReceiptHash() common.Hash { return b.header.OutgoingReceiptHash }
// Extra returns header extra. // Extra returns header extra.
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) } func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
@ -394,7 +423,7 @@ func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
func (b *Block) Header() *Header { return CopyHeader(b.header) } func (b *Block) Header() *Header { return CopyHeader(b.header) }
// Body returns the non-header content of the block. // Body returns the non-header content of the block.
func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} } func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.incomingReceipts} }
// Vdf returns header Vdf. // Vdf returns header Vdf.
func (b *Block) Vdf() []byte { return common.CopyBytes(b.header.Vdf) } func (b *Block) Vdf() []byte { return common.CopyBytes(b.header.Vdf) }
@ -439,11 +468,12 @@ func (b *Block) WithSeal(header *Header) *Block {
} }
// WithBody returns a new block with the given transaction and uncle contents. // WithBody returns a new block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, incomingReceipts CXReceiptsProofs) *Block {
block := &Block{ block := &Block{
header: CopyHeader(b.header), header: CopyHeader(b.header),
transactions: make([]*Transaction, len(transactions)), transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)), uncles: make([]*Header, len(uncles)),
incomingReceipts: incomingReceipts,
} }
copy(block.transactions, transactions) copy(block.transactions, transactions)
for i := range uncles { for i := range uncles {

@ -0,0 +1,83 @@
package types
import (
"math/big"
"sort"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/common"
)
// CrossLink is only used on beacon chain to store the hash links from other shards
type CrossLink struct {
ChainHeader *Header
}
// NewCrossLink returns a new cross link object
func NewCrossLink(header *Header) CrossLink {
return CrossLink{header}
}
// Header returns header
func (cl CrossLink) Header() *Header {
return cl.ChainHeader
}
// ShardID returns shardID
func (cl CrossLink) ShardID() uint32 {
return cl.ChainHeader.ShardID
}
// BlockNum returns blockNum
func (cl CrossLink) BlockNum() *big.Int {
return cl.ChainHeader.Number
}
// Hash returns hash
func (cl CrossLink) Hash() common.Hash {
return cl.ChainHeader.Hash()
}
// StateRoot returns hash of state root
func (cl CrossLink) StateRoot() common.Hash {
return cl.ChainHeader.Root
}
// OutgoingReceiptsRoot returns hash of cross shard receipts
func (cl CrossLink) OutgoingReceiptsRoot() common.Hash {
return cl.ChainHeader.OutgoingReceiptHash
}
// Serialize returns bytes of cross link rlp-encoded content
func (cl CrossLink) Serialize() []byte {
bytes, _ := rlp.EncodeToBytes(cl)
return bytes
}
// DeserializeCrossLink rlp-decode the bytes into cross link object.
func DeserializeCrossLink(bytes []byte) (*CrossLink, error) {
cl := &CrossLink{}
err := rlp.DecodeBytes(bytes, cl)
if err != nil {
return nil, err
}
return cl, err
}
// CrossLinks is a collection of cross links
type CrossLinks []CrossLink
// Sort crosslinks by shardID and then by blockNum
func (cls CrossLinks) Sort() {
sort.Slice(cls, func(i, j int) bool {
return cls[i].ShardID() < cls[j].ShardID() || (cls[i].ShardID() == cls[j].ShardID() && cls[i].BlockNum().Cmp(cls[j].BlockNum()) < 0)
})
}
// IsSorted checks whether the cross links are sorted
func (cls CrossLinks) IsSorted() bool {
return sort.SliceIsSorted(cls, func(i, j int) bool {
return cls[i].ShardID() < cls[j].ShardID() || (cls[i].ShardID() == cls[j].ShardID() && cls[i].BlockNum().Cmp(cls[j].BlockNum()) < 0)
})
}

@ -0,0 +1,180 @@
package types
import (
"bytes"
"encoding/binary"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/internal/ctxerror"
)
// CXReceipt represents a receipt for cross-shard transaction
type CXReceipt struct {
TxHash common.Hash // hash of the cross shard transaction in source shard
From common.Address
To *common.Address
ShardID uint32
ToShardID uint32
Amount *big.Int
}
// CXReceipts is a list of CXReceipt
type CXReceipts []*CXReceipt
// Len returns the length of s.
func (cs CXReceipts) Len() int { return len(cs) }
// Swap swaps the i'th and the j'th element in s.
func (cs CXReceipts) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] }
// GetRlp implements Rlpable and returns the i'th element of s in rlp.
func (cs CXReceipts) GetRlp(i int) []byte {
if len(cs) == 0 {
return []byte{}
}
enc, _ := rlp.EncodeToBytes(cs[i])
return enc
}
// ToShardID returns the destination shardID of the cxReceipt
func (cs CXReceipts) ToShardID(i int) uint32 {
if len(cs) == 0 {
return 0
}
return cs[i].ToShardID
}
// MaxToShardID returns the maximum destination shardID of cxReceipts
func (cs CXReceipts) MaxToShardID() uint32 {
maxShardID := uint32(0)
if len(cs) == 0 {
return maxShardID
}
for i := 0; i < len(cs); i++ {
if maxShardID < cs[i].ToShardID {
maxShardID = cs[i].ToShardID
}
}
return maxShardID
}
// NewCrossShardReceipt creates a cross shard receipt
func NewCrossShardReceipt(txHash common.Hash, from common.Address, to *common.Address, shardID uint32, toShardID uint32, amount *big.Int) *CXReceipt {
return &CXReceipt{TxHash: txHash, From: from, To: to, ShardID: shardID, ToShardID: toShardID, Amount: amount}
}
// CXMerkleProof represents the merkle proof of a collection of ordered cross shard transactions
type CXMerkleProof struct {
BlockNum *big.Int // blockNumber of source shard
BlockHash common.Hash // blockHash of source shard
ShardID uint32 // shardID of source shard
CXReceiptHash common.Hash // root hash of the cross shard receipts in a given block
ShardIDs []uint32 // order list, records destination shardID
CXShardHashes []common.Hash // ordered hash list, each hash corresponds to one destination shard's receipts root hash
}
// CXReceiptsProof carrys the cross shard receipts and merkle proof
type CXReceiptsProof struct {
Receipts CXReceipts
MerkleProof *CXMerkleProof
}
// CXReceiptsProofs is a list of CXReceiptsProof
type CXReceiptsProofs []*CXReceiptsProof
// Len returns the length of s.
func (cs CXReceiptsProofs) Len() int { return len(cs) }
// Swap swaps the i'th and the j'th element in s.
func (cs CXReceiptsProofs) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] }
// GetRlp implements Rlpable and returns the i'th element of s in rlp.
func (cs CXReceiptsProofs) GetRlp(i int) []byte {
if len(cs) == 0 {
return []byte{}
}
enc, _ := rlp.EncodeToBytes(cs[i])
return enc
}
// ToShardID returns the destination shardID of the cxReceipt
// Not used
func (cs CXReceiptsProofs) ToShardID(i int) uint32 {
return 0
}
// MaxToShardID returns the maximum destination shardID of cxReceipts
// Not used
func (cs CXReceiptsProofs) MaxToShardID() uint32 {
return 0
}
// GetToShardID get the destination shardID, return error if there is more than one unique shardID
func (cxp *CXReceiptsProof) GetToShardID() (uint32, error) {
var shardID uint32
if cxp == nil || len(cxp.Receipts) == 0 {
return uint32(0), ctxerror.New("[GetShardID] CXReceiptsProof or its receipts is NIL")
}
for i, cx := range cxp.Receipts {
if i == 0 {
shardID = cx.ToShardID
} else if shardID == cx.ToShardID {
continue
} else {
return shardID, ctxerror.New("[GetShardID] CXReceiptsProof contains distinct ToShardID")
}
}
return shardID, nil
}
// IsValidCXReceiptsProof checks whether the given CXReceiptsProof is consistency with itself
func (cxp *CXReceiptsProof) IsValidCXReceiptsProof() error {
toShardID, err := cxp.GetToShardID()
if err != nil {
return ctxerror.New("[IsValidCXReceiptsProof] invalid shardID").WithCause(err)
}
merkleProof := cxp.MerkleProof
shardRoot := common.Hash{}
foundMatchingShardID := false
byteBuffer := bytes.NewBuffer([]byte{})
// prepare to calculate source shard outgoing cxreceipts root hash
for j := 0; j < len(merkleProof.ShardIDs); j++ {
sKey := make([]byte, 4)
binary.BigEndian.PutUint32(sKey, merkleProof.ShardIDs[j])
byteBuffer.Write(sKey)
byteBuffer.Write(merkleProof.CXShardHashes[j][:])
if merkleProof.ShardIDs[j] == toShardID {
shardRoot = merkleProof.CXShardHashes[j]
foundMatchingShardID = true
}
}
if !foundMatchingShardID {
return ctxerror.New("[IsValidCXReceiptsProof] Didn't find matching shardID")
}
sourceShardID := merkleProof.ShardID
sourceBlockNum := merkleProof.BlockNum
sourceOutgoingCXReceiptsHash := merkleProof.CXReceiptHash
sha := DeriveSha(cxp.Receipts)
// (1) verify the CXReceipts trie root match
if sha != shardRoot {
return ctxerror.New("[IsValidCXReceiptsProof] Trie Root of ReadCXReceipts Not Match", "sourceShardID", sourceShardID, "sourceBlockNum", sourceBlockNum, "calculated", sha, "got", shardRoot)
}
// (2) verify the outgoingCXReceiptsHash match
outgoingHashFromSourceShard := crypto.Keccak256Hash(byteBuffer.Bytes())
if outgoingHashFromSourceShard != sourceOutgoingCXReceiptsHash {
return ctxerror.New("[IsValidCXReceiptsProof] IncomingReceiptRootHash from source shard not match", "sourceShardID", sourceShardID, "sourceBlockNum", sourceBlockNum, "calculated", outgoingHashFromSourceShard, "got", sourceOutgoingCXReceiptsHash)
}
return nil
}

@ -18,8 +18,10 @@ package types
import ( import (
"bytes" "bytes"
"encoding/binary"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
@ -28,6 +30,8 @@ import (
type DerivableList interface { type DerivableList interface {
Len() int Len() int
GetRlp(i int) []byte GetRlp(i int) []byte
ToShardID(i int) uint32
MaxToShardID() uint32 // return the maximum non-empty destination shardID
} }
// DeriveSha calculates the hash of the trie generated by DerivableList. // DeriveSha calculates the hash of the trie generated by DerivableList.
@ -41,3 +45,43 @@ func DeriveSha(list DerivableList) common.Hash {
} }
return trie.Hash() return trie.Hash()
} }
// DeriveOneShardSha calculates the hash of the trie of
// cross shard transactions with the given destination shard
func DeriveOneShardSha(list DerivableList, shardID uint32) common.Hash {
keybuf := new(bytes.Buffer)
trie := new(trie.Trie)
for i := 0; i < list.Len(); i++ {
if list.ToShardID(i) != shardID {
continue
}
keybuf.Reset()
rlp.Encode(keybuf, uint(i))
trie.Update(keybuf.Bytes(), list.GetRlp(i))
}
return trie.Hash()
}
// DeriveMultipleShardsSha calcualtes the root hash of tries generated by DerivableList of multiple shards
// If the list is empty, then return EmptyRootHash
// else, return |shard0|trieHash0|shard1|trieHash1|...| for non-empty destination shards
func DeriveMultipleShardsSha(list DerivableList) common.Hash {
by := []byte{}
if list.Len() == 0 {
return EmptyRootHash
}
for i := 0; i <= int(list.MaxToShardID()); i++ {
shardHash := DeriveOneShardSha(list, uint32(i))
if shardHash == EmptyRootHash {
continue
}
sKey := make([]byte, 4)
binary.BigEndian.PutUint32(sKey, uint32(i))
by = append(by, sKey...)
by = append(by, shardHash[:]...)
}
if len(by) == 0 {
return EmptyRootHash
}
return crypto.Keccak256Hash(by)
}

@ -16,13 +16,14 @@ var _ = (*txdataMarshaling)(nil)
// MarshalJSON marshals as JSON. // MarshalJSON marshals as JSON.
func (t txdata) MarshalJSON() ([]byte, error) { func (t txdata) MarshalJSON() ([]byte, error) {
type txdata struct { type txdata struct {
AccountNonce hexutil.Uint64 `json:"nonce" gencodec:"required"` AccountNonce hexutil.Uint64 `json:"nonce" gencodec:"required"`
ShardID uint32 `json:"shardID" gencodec:"required"` Price *hexutil.Big `json:"gasPrice" gencodec:"required"`
Price *hexutil.Big `json:"gasPrice" gencodec:"required"` GasLimit hexutil.Uint64 `json:"gas" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gas" gencodec:"required"` ShardID uint32 `json:"shardID" gencodec:"required"`
Recipient *common.Address `json:"to" rlp:"nil"` ToShardID uint32 `json:"toShardID"`
Amount *hexutil.Big `json:"value" gencodec:"required"` Recipient *common.Address `json:"to" rlp:"nil"`
Payload hexutil.Bytes `json:"input" gencodec:"required"` Amount *hexutil.Big `json:"value" gencodec:"required"`
Payload hexutil.Bytes `json:"input" gencodec:"required"`
V *hexutil.Big `json:"v" gencodec:"required"` V *hexutil.Big `json:"v" gencodec:"required"`
R *hexutil.Big `json:"r" gencodec:"required"` R *hexutil.Big `json:"r" gencodec:"required"`
S *hexutil.Big `json:"s" gencodec:"required"` S *hexutil.Big `json:"s" gencodec:"required"`
@ -30,9 +31,10 @@ func (t txdata) MarshalJSON() ([]byte, error) {
} }
var enc txdata var enc txdata
enc.AccountNonce = hexutil.Uint64(t.AccountNonce) enc.AccountNonce = hexutil.Uint64(t.AccountNonce)
enc.ShardID = t.ShardID
enc.Price = (*hexutil.Big)(t.Price) enc.Price = (*hexutil.Big)(t.Price)
enc.GasLimit = hexutil.Uint64(t.GasLimit) enc.GasLimit = hexutil.Uint64(t.GasLimit)
enc.ShardID = t.ShardID
enc.ToShardID = t.ToShardID
enc.Recipient = t.Recipient enc.Recipient = t.Recipient
enc.Amount = (*hexutil.Big)(t.Amount) enc.Amount = (*hexutil.Big)(t.Amount)
enc.Payload = t.Payload enc.Payload = t.Payload
@ -46,13 +48,14 @@ func (t txdata) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON. // UnmarshalJSON unmarshals from JSON.
func (t *txdata) UnmarshalJSON(input []byte) error { func (t *txdata) UnmarshalJSON(input []byte) error {
type txdata struct { type txdata struct {
AccountNonce *hexutil.Uint64 `json:"nonce" gencodec:"required"` AccountNonce *hexutil.Uint64 `json:"nonce" gencodec:"required"`
ShardID *uint32 `json:"shardID" gencodec:"required"` Price *hexutil.Big `json:"gasPrice" gencodec:"required"`
Price *hexutil.Big `json:"gasPrice" gencodec:"required"` GasLimit *hexutil.Uint64 `json:"gas" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gas" gencodec:"required"` ShardID *uint32 `json:"shardID" gencodec:"required"`
Recipient *common.Address `json:"to" rlp:"nil"` ToShardID *uint32 `json:"toShardID"`
Amount *hexutil.Big `json:"value" gencodec:"required"` Recipient *common.Address `json:"to" rlp:"nil"`
Payload *hexutil.Bytes `json:"input" gencodec:"required"` Amount *hexutil.Big `json:"value" gencodec:"required"`
Payload *hexutil.Bytes `json:"input" gencodec:"required"`
V *hexutil.Big `json:"v" gencodec:"required"` V *hexutil.Big `json:"v" gencodec:"required"`
R *hexutil.Big `json:"r" gencodec:"required"` R *hexutil.Big `json:"r" gencodec:"required"`
S *hexutil.Big `json:"s" gencodec:"required"` S *hexutil.Big `json:"s" gencodec:"required"`
@ -66,10 +69,6 @@ func (t *txdata) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'nonce' for txdata") return errors.New("missing required field 'nonce' for txdata")
} }
t.AccountNonce = uint64(*dec.AccountNonce) t.AccountNonce = uint64(*dec.AccountNonce)
if dec.ShardID == nil {
return errors.New("missing required field 'shardID' for txdata")
}
t.ShardID = *dec.ShardID
if dec.Price == nil { if dec.Price == nil {
return errors.New("missing required field 'gasPrice' for txdata") return errors.New("missing required field 'gasPrice' for txdata")
} }
@ -78,6 +77,13 @@ func (t *txdata) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'gas' for txdata") return errors.New("missing required field 'gas' for txdata")
} }
t.GasLimit = uint64(*dec.GasLimit) t.GasLimit = uint64(*dec.GasLimit)
if dec.ShardID == nil {
return errors.New("missing required field 'shardID' for txdata")
}
t.ShardID = *dec.ShardID
if dec.ToShardID != nil {
t.ToShardID = *dec.ToShardID
}
if dec.Recipient != nil { if dec.Recipient != nil {
t.Recipient = dec.Recipient t.Recipient = dec.Recipient
} }

@ -207,3 +207,14 @@ func (r Receipts) GetRlp(i int) []byte {
} }
return bytes return bytes
} }
// ToShardID returns 0, arbitrary value
// This function is NOT used, just to compatible with DerivableList interface
func (r Receipts) ToShardID(i int) uint32 {
return 0
}
// MaxToShardID returns 0, arbitrary value, NOT used
func (r Receipts) MaxToShardID() uint32 {
return 0
}

@ -36,6 +36,17 @@ var (
ErrInvalidSig = errors.New("invalid transaction v, r, s values") ErrInvalidSig = errors.New("invalid transaction v, r, s values")
) )
// TransactionType different types of transactions
type TransactionType byte
// Different Transaction Types
const (
SameShardTx TransactionType = iota
SubtractionOnly // only subtract tokens from source shard account
AdditionOnly // only add tokens to destination shard account
InvalidTx
)
// Transaction struct. // Transaction struct.
type Transaction struct { type Transaction struct {
data txdata data txdata
@ -45,6 +56,18 @@ type Transaction struct {
from atomic.Value from atomic.Value
} }
//String print mode string
func (txType TransactionType) String() string {
if txType == SameShardTx {
return "SameShardTx"
} else if txType == SubtractionOnly {
return "SubtractionOnly"
} else if txType == AdditionOnly {
return "AdditionOnly"
}
return "Unknown"
}
type txdata struct { type txdata struct {
AccountNonce uint64 `json:"nonce" gencodec:"required"` AccountNonce uint64 `json:"nonce" gencodec:"required"`
Price *big.Int `json:"gasPrice" gencodec:"required"` Price *big.Int `json:"gasPrice" gencodec:"required"`
@ -75,12 +98,17 @@ type txdataMarshaling struct {
S *hexutil.Big S *hexutil.Big
} }
// NewTransaction returns new transaction. // NewTransaction returns new transaction, this method is to create same shard transaction
func NewTransaction(nonce uint64, to common.Address, shardID uint32, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { func NewTransaction(nonce uint64, to common.Address, shardID uint32, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction {
return newTransaction(nonce, &to, shardID, amount, gasLimit, gasPrice, data) return newTransaction(nonce, &to, shardID, amount, gasLimit, gasPrice, data)
} }
// NewContractCreation returns contract transaction. // NewCrossShardTransaction returns new cross shard transaction
func NewCrossShardTransaction(nonce uint64, to *common.Address, shardID uint32, toShardID uint32, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction {
return newCrossShardTransaction(nonce, to, shardID, toShardID, amount, gasLimit, gasPrice, data)
}
// NewContractCreation returns same shard contract transaction.
func NewContractCreation(nonce uint64, shardID uint32, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { func NewContractCreation(nonce uint64, shardID uint32, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction {
return newTransaction(nonce, nil, shardID, amount, gasLimit, gasPrice, data) return newTransaction(nonce, nil, shardID, amount, gasLimit, gasPrice, data)
} }
@ -93,6 +121,34 @@ func newTransaction(nonce uint64, to *common.Address, shardID uint32, amount *bi
AccountNonce: nonce, AccountNonce: nonce,
Recipient: to, Recipient: to,
ShardID: shardID, ShardID: shardID,
ToShardID: shardID,
Payload: data,
Amount: new(big.Int),
GasLimit: gasLimit,
Price: new(big.Int),
V: new(big.Int),
R: new(big.Int),
S: new(big.Int),
}
if amount != nil {
d.Amount.Set(amount)
}
if gasPrice != nil {
d.Price.Set(gasPrice)
}
return &Transaction{data: d}
}
func newCrossShardTransaction(nonce uint64, to *common.Address, shardID uint32, toShardID uint32, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction {
if len(data) > 0 {
data = common.CopyBytes(data)
}
d := txdata{
AccountNonce: nonce,
Recipient: to,
ShardID: shardID,
ToShardID: toShardID,
Payload: data, Payload: data,
Amount: new(big.Int), Amount: new(big.Int),
GasLimit: gasLimit, GasLimit: gasLimit,
@ -121,6 +177,11 @@ func (tx *Transaction) ShardID() uint32 {
return tx.data.ShardID return tx.data.ShardID
} }
// ToShardID returns the destination shard id this transaction is going to
func (tx *Transaction) ToShardID() uint32 {
return tx.data.ToShardID
}
// Protected returns whether the transaction is protected from replay protection. // Protected returns whether the transaction is protected from replay protection.
func (tx *Transaction) Protected() bool { func (tx *Transaction) Protected() bool {
return isProtectedV(tx.data.V) return isProtectedV(tx.data.V)
@ -307,6 +368,16 @@ func (s Transactions) GetRlp(i int) []byte {
return enc return enc
} }
// ToShardID returns the destination shardID of given transaction
func (s Transactions) ToShardID(i int) uint32 {
return s[i].data.ToShardID
}
// MaxToShardID returns 0, arbitrary value, NOT use
func (s Transactions) MaxToShardID() uint32 {
return 0
}
// TxDifference returns a new set which is the difference between a and b. // TxDifference returns a new set which is the difference between a and b.
func TxDifference(a, b Transactions) Transactions { func TxDifference(a, b Transactions) Transactions {
keep := make(Transactions, 0, len(a)) keep := make(Transactions, 0, len(a))

@ -42,14 +42,7 @@ type sigCache struct {
// MakeSigner returns a Signer based on the given chain config and block number. // MakeSigner returns a Signer based on the given chain config and block number.
func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer {
var signer Signer var signer Signer
switch { signer = NewEIP155Signer(config.ChainID)
case config.IsEIP155(blockNumber):
signer = NewEIP155Signer(config.ChainID)
case config.IsHomestead(blockNumber):
signer = HomesteadSigner{}
default:
signer = FrontierSigner{}
}
return signer return signer
} }
@ -231,10 +224,10 @@ func (fs FrontierSigner) Sender(tx *Transaction) (common.Address, error) {
} }
func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) { func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) {
V := byte(Vb.Uint64() - 27)
if Vb.BitLen() > 8 { if Vb.BitLen() > 8 {
return common.Address{}, ErrInvalidSig return common.Address{}, ErrInvalidSig
} }
V := byte(Vb.Uint64() - 27)
if !crypto.ValidateSignatureValues(V, R, S, homestead) { if !crypto.ValidateSignatureValues(V, R, S, homestead) {
return common.Address{}, ErrInvalidSig return common.Address{}, ErrInvalidSig
} }

@ -24,6 +24,8 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/types"
) )
// emptyCodeHash is used by create to ensure deployment is disallowed to already // emptyCodeHash is used by create to ensure deployment is disallowed to already
@ -34,7 +36,7 @@ type (
// CanTransferFunc is the signature of a transfer guard function // CanTransferFunc is the signature of a transfer guard function
CanTransferFunc func(StateDB, common.Address, *big.Int) bool CanTransferFunc func(StateDB, common.Address, *big.Int) bool
// TransferFunc is the signature of a transfer function // TransferFunc is the signature of a transfer function
TransferFunc func(StateDB, common.Address, common.Address, *big.Int) TransferFunc func(StateDB, common.Address, common.Address, *big.Int, types.TransactionType)
// GetHashFunc returns the nth block hash in the blockchain // GetHashFunc returns the nth block hash in the blockchain
// and is used by the BLOCKHASH EVM op code. // and is used by the BLOCKHASH EVM op code.
GetHashFunc func(uint64) common.Hash GetHashFunc func(uint64) common.Hash
@ -88,6 +90,8 @@ type Context struct {
BlockNumber *big.Int // Provides information for NUMBER BlockNumber *big.Int // Provides information for NUMBER
Time *big.Int // Provides information for TIME Time *big.Int // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY Difficulty *big.Int // Provides information for DIFFICULTY
TxType types.TransactionType
} }
// EVM is the Ethereum Virtual Machine base object and provides // EVM is the Ethereum Virtual Machine base object and provides
@ -192,6 +196,9 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
if evm.depth > int(params.CallCreateDepth) { if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth return nil, gas, ErrDepth
} }
txType := evm.Context.TxType
// Fail if we're trying to transfer more than the available balance // Fail if we're trying to transfer more than the available balance
if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance return nil, gas, ErrInsufficientBalance
@ -216,7 +223,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
} }
evm.StateDB.CreateAccount(addr) evm.StateDB.CreateAccount(addr)
} }
evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value) evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value, txType)
// Initialise a new contract and set the code that is to be used by the EVM. // Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only. // The contract is a scoped environment for this execution context only.
contract := NewContract(caller, to, value, gas) contract := NewContract(caller, to, value, gas)
@ -399,7 +406,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
if evm.ChainConfig().IsEIP158(evm.BlockNumber) { if evm.ChainConfig().IsEIP158(evm.BlockNumber) {
evm.StateDB.SetNonce(address, 1) evm.StateDB.SetNonce(address, 1)
} }
evm.Transfer(evm.StateDB, caller.Address(), address, value) evm.Transfer(evm.StateDB, caller.Address(), address, value, types.SameShardTx)
// initialise a new contract and set the code that is to be used by the // initialise a new contract and set the code that is to be used by the
// EVM. The contract is a scoped environment for this execution context // EVM. The contract is a scoped environment for this execution context

@ -126,7 +126,7 @@ func TestVrf(test *testing.T) {
tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), 0, big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), 0, big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
txs := []*types.Transaction{tx1} txs := []*types.Transaction{tx1}
block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil) block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil)
blockHash := block.Hash() blockHash := block.Hash()
dRand.vrf(blockHash) dRand.vrf(blockHash)

@ -119,7 +119,7 @@ func (c *Client) getBlock(ctx context.Context, method string, args ...interface{
} }
txs[i] = tx.tx txs[i] = tx.tx
} }
return types.NewBlockWithHeader(head).WithBody(txs, []*types.Header{}), nil return types.NewBlockWithHeader(head).WithBody(txs, []*types.Header{}, nil), nil
} }
func toBlockNumArg(number *big.Int) string { func toBlockNumArg(number *big.Int) string {

@ -0,0 +1,187 @@
package chain
import (
"encoding/binary"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils"
)
type engineImpl struct{}
// Engine is an algorithm-agnostic consensus engine.
var Engine = &engineImpl{}
// SealHash returns the hash of a block prior to it being sealed.
func (e *engineImpl) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
// TODO: update with new fields
if err := rlp.Encode(hasher, []interface{}{
header.ParentHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra,
}); err != nil {
utils.Logger().Warn().Err(err).Msg("rlp.Encode failed")
}
hasher.Sum(hash[:0])
return hash
}
// Seal is to seal final block.
func (e *engineImpl) Seal(chain engine.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
// TODO: implement final block sealing
return nil
}
// Author returns the author of the block header.
func (e *engineImpl) Author(header *types.Header) (common.Address, error) {
// TODO: implement this
return common.Address{}, nil
}
// Prepare is to prepare ...
// TODO(RJ): fix it.
func (e *engineImpl) Prepare(chain engine.ChainReader, header *types.Header) error {
// TODO: implement prepare method
return nil
}
// VerifyHeader checks whether a header conforms to the consensus rules of the bft engine.
func (e *engineImpl) VerifyHeader(chain engine.ChainReader, header *types.Header, seal bool) error {
parentHeader := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parentHeader == nil {
return engine.ErrUnknownAncestor
}
if seal {
if err := e.VerifySeal(chain, header); err != nil {
return err
}
}
return nil
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications.
func (e *engineImpl) VerifyHeaders(chain engine.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
abort, results := make(chan struct{}), make(chan error, len(headers))
for i := 0; i < len(headers); i++ {
results <- nil
}
return abort, results
}
// retrievePublicKeysFromLastBlock finds the public keys of last block's committee
func retrievePublicKeysFromLastBlock(bc engine.ChainReader, header *types.Header) ([]*bls.PublicKey, error) {
parentHeader := bc.GetHeaderByHash(header.ParentHash)
if parentHeader == nil {
return nil, ctxerror.New("cannot find parent block header in DB",
"parentHash", header.ParentHash)
}
parentShardState, err := bc.ReadShardState(parentHeader.Epoch)
if err != nil {
return nil, ctxerror.New("cannot read shard state",
"epoch", parentHeader.Epoch,
).WithCause(err)
}
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID)
if parentCommittee == nil {
return nil, ctxerror.New("cannot find shard in the shard state",
"parentBlockNumber", parentHeader.Number,
"shardID", parentHeader.ShardID,
)
}
var committerKeys []*bls.PublicKey
for _, member := range parentCommittee.NodeList {
committerKey := new(bls.PublicKey)
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil {
return nil, ctxerror.New("cannot convert BLS public key",
"blsPublicKey", member.BlsPublicKey).WithCause(err)
}
committerKeys = append(committerKeys, committerKey)
}
return committerKeys, nil
}
// VerifySeal implements Engine, checking whether the given block satisfies
// the PoS difficulty requirements, i.e. >= 2f+1 valid signatures from the committee
func (e *engineImpl) VerifySeal(chain engine.ChainReader, header *types.Header) error {
if chain.CurrentHeader().Number.Uint64() <= uint64(1) {
return nil
}
publicKeys, err := retrievePublicKeysFromLastBlock(chain, header)
if err != nil {
return ctxerror.New("[VerifySeal] Cannot retrieve publickeys from last block").WithCause(err)
}
payload := append(header.LastCommitSignature[:], header.LastCommitBitmap...)
aggSig, mask, err := ReadSignatureBitmapByPublicKeys(payload, publicKeys)
if err != nil {
return ctxerror.New("[VerifySeal] Unable to deserialize the LastCommitSignature and LastCommitBitmap in Block Header").WithCause(err)
}
parentHeader := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
parentQuorum, err := QuorumForBlock(chain, parentHeader)
if err != nil {
return errors.Wrapf(err,
"cannot calculate quorum for block %s", header.Number)
}
if count := utils.CountOneBits(mask.Bitmap); count < parentQuorum {
return ctxerror.New("[VerifySeal] Not enough signature in LastCommitSignature from Block Header",
"need", parentQuorum, "got", count)
}
blockNumHash := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumHash, header.Number.Uint64()-1)
lastCommitPayload := append(blockNumHash, header.ParentHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, lastCommitPayload) {
return ctxerror.New("[VerifySeal] Unable to verify aggregated signature from last block", "lastBlockNum", header.Number.Uint64()-1, "lastBlockHash", header.ParentHash)
}
return nil
}
// Finalize implements Engine, accumulating the block rewards,
// setting the final state and assembling the block.
func (e *engineImpl) Finalize(chain engine.ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, receipts []*types.Receipt, outcxs []*types.CXReceipt, incxs []*types.CXReceiptsProof) (*types.Block, error) {
// Accumulate any block and uncle rewards and commit the final state root
// Header seems complete, assemble into a block and return
if err := AccumulateRewards(chain, state, header); err != nil {
return nil, ctxerror.New("cannot pay block reward").WithCause(err)
}
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
return types.NewBlock(header, txs, receipts, outcxs, incxs), nil
}
// QuorumForBlock returns the quorum for the given block header.
func QuorumForBlock(
chain engine.ChainReader, h *types.Header,
) (quorum int, err error) {
ss, err := chain.ReadShardState(h.Epoch)
if err != nil {
return 0, errors.Wrapf(err,
"cannot read shard state for epoch %s", h.Epoch)
}
c := ss.FindCommitteeByID(h.ShardID)
if c == nil {
return 0, errors.Errorf(
"cannot find shard %d in shard state", h.ShardID)
}
return (len(c.NodeList))*2/3 + 1, nil
}

@ -0,0 +1,105 @@
package chain
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/common/denominations"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
bls2 "github.com/harmony-one/harmony/crypto/bls"
common2 "github.com/harmony-one/harmony/internal/common"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils"
)
// BlockReward is the block reward, to be split evenly among block signers.
var BlockReward = new(big.Int).Mul(big.NewInt(24), big.NewInt(denominations.One))
// AccumulateRewards credits the coinbase of the given block with the mining
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
func AccumulateRewards(
bc engine.ChainReader, state *state.DB, header *types.Header,
) error {
blockNum := header.Number.Uint64()
if blockNum == 0 {
// Epoch block has no parent to reward.
return nil
}
// TODO ek – retrieving by parent number (blockNum - 1) doesn't work,
// while it is okay with hash. Sounds like DB inconsistency.
// Figure out why.
parentHeader := bc.GetHeaderByHash(header.ParentHash)
if parentHeader == nil {
return ctxerror.New("cannot find parent block header in DB",
"parentHash", header.ParentHash)
}
if parentHeader.Number.Cmp(common.Big0) == 0 {
// Parent is an epoch block,
// which is not signed in the usual manner therefore rewards nothing.
return nil
}
parentShardState, err := bc.ReadShardState(parentHeader.Epoch)
if err != nil {
return ctxerror.New("cannot read shard state",
"epoch", parentHeader.Epoch,
).WithCause(err)
}
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID)
if parentCommittee == nil {
return ctxerror.New("cannot find shard in the shard state",
"parentBlockNumber", parentHeader.Number,
"shardID", parentHeader.ShardID,
)
}
var committerKeys []*bls.PublicKey
for _, member := range parentCommittee.NodeList {
committerKey := new(bls.PublicKey)
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil {
return ctxerror.New("cannot convert BLS public key",
"blsPublicKey", member.BlsPublicKey).WithCause(err)
}
committerKeys = append(committerKeys, committerKey)
}
mask, err := bls2.NewMask(committerKeys, nil)
if err != nil {
return ctxerror.New("cannot create group sig mask").WithCause(err)
}
if err := mask.SetMask(header.LastCommitBitmap); err != nil {
return ctxerror.New("cannot set group sig mask bits").WithCause(err)
}
totalAmount := big.NewInt(0)
var accounts []common.Address
signers := []string{}
for idx, member := range parentCommittee.NodeList {
if signed, err := mask.IndexEnabled(idx); err != nil {
return ctxerror.New("cannot check for committer bit",
"committerIndex", idx,
).WithCause(err)
} else if signed {
accounts = append(accounts, member.EcdsaAddress)
}
}
numAccounts := big.NewInt(int64(len(accounts)))
last := new(big.Int)
for i, account := range accounts {
cur := new(big.Int)
cur.Mul(BlockReward, big.NewInt(int64(i+1))).Div(cur, numAccounts)
diff := new(big.Int).Sub(cur, last)
signers = append(signers, common2.MustAddressToBech32(account))
state.AddBalance(account, diff)
totalAmount = new(big.Int).Add(totalAmount, diff)
last = cur
}
header.Logger(utils.Logger()).Debug().
Str("NumAccounts", numAccounts.String()).
Str("TotalAmount", totalAmount.String()).
Strs("Signers", signers).
Msg("[Block Reward] Successfully paid out block reward")
return nil
}

@ -0,0 +1,41 @@
package chain
import (
"errors"
"github.com/harmony-one/bls/ffi/go/bls"
bls2 "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/utils"
)
// ReadSignatureBitmapByPublicKeys read the payload of signature and bitmap based on public keys
func ReadSignatureBitmapByPublicKeys(recvPayload []byte, publicKeys []*bls.PublicKey) (*bls.Sign, *bls2.Mask, error) {
if len(recvPayload) < 96 {
return nil, nil, errors.New("payload not have enough length")
}
payload := append(recvPayload[:0:0], recvPayload...)
//#### Read payload data
// 96 byte of multi-sig
offset := 0
multiSig := payload[offset : offset+96]
offset += 96
// bitmap
bitmap := payload[offset:]
//#### END Read payload data
aggSig := bls.Sign{}
err := aggSig.Deserialize(multiSig)
if err != nil {
return nil, nil, errors.New("unable to deserialize multi-signature from payload")
}
mask, err := bls2.NewMask(publicKeys, nil)
if err != nil {
utils.Logger().Warn().Err(err).Msg("onNewView unable to setup mask for prepared message")
return nil, nil, errors.New("unable to setup mask from payload")
}
if err := mask.SetMask(bitmap); err != nil {
utils.Logger().Warn().Err(err).Msg("mask.SetMask failed")
}
return &aggSig, mask, nil
}

@ -220,9 +220,11 @@ func MustAddressToBech32(addr ethCommon.Address) string {
} }
// ParseAddr parses the given address, either as bech32 or as hex. // ParseAddr parses the given address, either as bech32 or as hex.
// The result can be 0x00..00 if the passing param is not a correct address.
func ParseAddr(s string) ethCommon.Address { func ParseAddr(s string) ethCommon.Address {
if addr, err := Bech32ToAddress(s); err == nil { if addr, err := Bech32ToAddress(s); err == nil {
return addr return addr
} }
// The result can be 0x00...00 if the passing param is not a correct address.
return ethCommon.HexToAddress(s) return ethCommon.HexToAddress(s)
} }

@ -23,6 +23,8 @@ import (
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
ethCommon "github.com/ethereum/go-ethereum/common"
) )
func TestIsBech32Address(t *testing.T) { func TestIsBech32Address(t *testing.T) {
@ -139,6 +141,15 @@ func BenchmarkAddressBech32(b *testing.B) {
} }
} }
func TestAddressToBech32(t *testing.T) {
adr := ethCommon.HexToAddress("0x15a128e599b74842bccba860311efa92991bffb5")
if address, err := AddressToBech32(adr); err == nil {
if address != "one1zksj3evekayy90xt4psrz8h6j2v3hla4qwz4ur" {
t.Errorf("error on parseAddr")
}
}
}
func TestAddress_Scan(t *testing.T) { func TestAddress_Scan(t *testing.T) {
type args struct { type args struct {
src interface{} src interface{}

@ -71,7 +71,7 @@ type ConfigType struct {
client p2p.GroupID // the client group ID of the shard client p2p.GroupID // the client group ID of the shard
isClient bool // whether this node is a client node, such as wallet/txgen isClient bool // whether this node is a client node, such as wallet/txgen
isBeacon bool // whether this node is beacon node doing consensus or not isBeacon bool // whether this node is beacon node doing consensus or not
ShardID uint32 // ShardID of this node ShardID uint32 // ShardID of this node; TODO ek – reviisit when resharding
role Role // Role of the node role Role // Role of the node
Port string // Port of the node. Port string // Port of the node.
IP string // IP of the node. IP string // IP of the node.

@ -36,6 +36,10 @@ func (s fixedSchedule) VdfDifficulty() int {
return mainnetVdfDifficulty return mainnetVdfDifficulty
} }
func (s fixedSchedule) FirstCrossLinkBlock() uint64 {
return mainnetFirstCrossLinkBlock
}
// ConsensusRatio ratio of new nodes vs consensus total nodes // ConsensusRatio ratio of new nodes vs consensus total nodes
func (s fixedSchedule) ConsensusRatio() float64 { func (s fixedSchedule) ConsensusRatio() float64 {
return mainnetConsensusRatio return mainnetConsensusRatio

@ -22,7 +22,7 @@ const (
localnetVdfDifficulty = 5000 // This takes about 10s to finish the vdf localnetVdfDifficulty = 5000 // This takes about 10s to finish the vdf
localnetConsensusRatio = float64(0.1) localnetConsensusRatio = float64(0.1)
// TODO: remove it after randomness feature turned on mainnet localnetFirstCrossLinkBlock = 3
localnetRandomnessStartingEpoch = 0 localnetRandomnessStartingEpoch = 0
) )
@ -67,6 +67,10 @@ func (ls localnetSchedule) VdfDifficulty() int {
return localnetVdfDifficulty return localnetVdfDifficulty
} }
func (ls localnetSchedule) FirstCrossLinkBlock() uint64 {
return localnetFirstCrossLinkBlock
}
// ConsensusRatio ratio of new nodes vs consensus total nodes // ConsensusRatio ratio of new nodes vs consensus total nodes
func (ls localnetSchedule) ConsensusRatio() float64 { func (ls localnetSchedule) ConsensusRatio() float64 {
return localnetConsensusRatio return localnetConsensusRatio
@ -82,4 +86,4 @@ var localnetReshardingEpoch = []*big.Int{big.NewInt(0), big.NewInt(localnetV1Epo
var localnetV0 = MustNewInstance(2, 7, 5, genesis.LocalHarmonyAccounts, genesis.LocalFnAccounts, localnetReshardingEpoch) var localnetV0 = MustNewInstance(2, 7, 5, genesis.LocalHarmonyAccounts, genesis.LocalFnAccounts, localnetReshardingEpoch)
var localnetV1 = MustNewInstance(2, 7, 5, genesis.LocalHarmonyAccountsV1, genesis.LocalFnAccountsV1, localnetReshardingEpoch) var localnetV1 = MustNewInstance(2, 7, 5, genesis.LocalHarmonyAccountsV1, genesis.LocalFnAccountsV1, localnetReshardingEpoch)
var localnetV2 = MustNewInstance(2, 10, 4, genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, localnetReshardingEpoch) var localnetV2 = MustNewInstance(2, 9, 6, genesis.LocalHarmonyAccountsV2, genesis.LocalFnAccountsV2, localnetReshardingEpoch)

@ -13,6 +13,8 @@ const (
mainnetVdfDifficulty = 50000 // This takes about 100s to finish the vdf mainnetVdfDifficulty = 50000 // This takes about 100s to finish the vdf
mainnetConsensusRatio = float64(0.66) mainnetConsensusRatio = float64(0.66)
mainnetFirstCrossLinkBlock = 524288 // 32 * 2^14
// TODO: remove it after randomness feature turned on mainnet // TODO: remove it after randomness feature turned on mainnet
mainnetRandomnessStartingEpoch = 100000 mainnetRandomnessStartingEpoch = 100000
@ -76,6 +78,10 @@ func (ms mainnetSchedule) VdfDifficulty() int {
return mainnetVdfDifficulty return mainnetVdfDifficulty
} }
func (ms mainnetSchedule) FirstCrossLinkBlock() uint64 {
return mainnetFirstCrossLinkBlock
}
// ConsensusRatio ratio of new nodes vs consensus total nodes // ConsensusRatio ratio of new nodes vs consensus total nodes
func (ms mainnetSchedule) ConsensusRatio() float64 { func (ms mainnetSchedule) ConsensusRatio() float64 {
return mainnetConsensusRatio return mainnetConsensusRatio

@ -40,7 +40,11 @@ func (pangaeaSchedule) ConsensusRatio() float64 {
var pangaeaReshardingEpoch = []*big.Int{common.Big0} var pangaeaReshardingEpoch = []*big.Int{common.Big0}
var pangaeaV0 = MustNewInstance( var pangaeaV0 = MustNewInstance(
4, 250, 230, genesis.PangaeaAccounts, genesis.FoundationalPangaeaAccounts, pangaeaReshardingEpoch) 4, 250, 20, genesis.PangaeaAccounts, genesis.FoundationalPangaeaAccounts, pangaeaReshardingEpoch)
func (pangaeaSchedule) FirstCrossLinkBlock() uint64 {
return testnetFirstCrossLinkBlock
}
// TODO: remove it after randomness feature turned on mainnet // TODO: remove it after randomness feature turned on mainnet
//RandonnessStartingEpoch returns starting epoch of randonness generation //RandonnessStartingEpoch returns starting epoch of randonness generation

@ -28,6 +28,9 @@ type Schedule interface {
// ConsensusRatio ratio of new nodes vs consensus total nodes // ConsensusRatio ratio of new nodes vs consensus total nodes
ConsensusRatio() float64 ConsensusRatio() float64
// FirstCrossLinkBlock returns the first cross link block number that will be accepted into beacon chain
FirstCrossLinkBlock() uint64
// TODO: remove it after randomness feature turned on mainnet // TODO: remove it after randomness feature turned on mainnet
//RandomnessStartingEpoch returns starting epoch of randonness generation //RandomnessStartingEpoch returns starting epoch of randonness generation
RandomnessStartingEpoch() uint64 RandomnessStartingEpoch() uint64

@ -20,6 +20,8 @@ const (
threeOne = 111 threeOne = 111
testnetVdfDifficulty = 10000 // This takes about 20s to finish the vdf testnetVdfDifficulty = 10000 // This takes about 20s to finish the vdf
testnetFirstCrossLinkBlock = 100
) )
func (testnetSchedule) InstanceForEpoch(epoch *big.Int) Instance { func (testnetSchedule) InstanceForEpoch(epoch *big.Int) Instance {
@ -64,6 +66,10 @@ func (ts testnetSchedule) VdfDifficulty() int {
return testnetVdfDifficulty return testnetVdfDifficulty
} }
func (ts testnetSchedule) FirstCrossLinkBlock() uint64 {
return testnetFirstCrossLinkBlock
}
// ConsensusRatio ratio of new nodes vs consensus total nodes // ConsensusRatio ratio of new nodes vs consensus total nodes
func (ts testnetSchedule) ConsensusRatio() float64 { func (ts testnetSchedule) ConsensusRatio() float64 {
return mainnetConsensusRatio return mainnetConsensusRatio

@ -38,7 +38,7 @@ var LocalHarmonyAccountsV1 = []DeployAccount{
// LocalFnAccountsV1 are the accounts for the initial FN used for local test. // LocalFnAccountsV1 are the accounts for the initial FN used for local test.
var LocalFnAccountsV1 = []DeployAccount{ var LocalFnAccountsV1 = []DeployAccount{
{Index: " 0 ", Address: "one1a50tun737ulcvwy0yvve0pvu5skq0kjargvhwe", BlsPublicKey: "4235d4ae2219093632c61db4f71ff0c32bdb56463845f8477c2086af1fe643194d3709575707148cad4f835f2fc4ea05"}, {Index: " 0 ", Address: "one1a50tun737ulcvwy0yvve0pvu5skq0kjargvhwe", BlsPublicKey: "52ecce5f64db21cbe374c9268188f5d2cdd5bec1a3112276a350349860e35fb81f8cfe447a311e0550d961cf25cb988d"},
{Index: " 1 ", Address: "one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg", BlsPublicKey: "a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100"}, {Index: " 1 ", Address: "one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg", BlsPublicKey: "a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100"},
{Index: " 2 ", Address: "one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7", BlsPublicKey: "678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c"}, {Index: " 2 ", Address: "one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7", BlsPublicKey: "678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c"},
{Index: " 3 ", Address: "one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k", BlsPublicKey: "63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512"}, {Index: " 3 ", Address: "one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k", BlsPublicKey: "63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512"},
@ -54,13 +54,17 @@ var LocalHarmonyAccountsV2 = []DeployAccount{
{Index: " 5 ", Address: "one1est2gxcvavmtnzc7mhd73gzadm3xxcv5zczdtw", BlsPublicKey: "776f3b8704f4e1092a302a60e84f81e476c212d6f458092b696df420ea19ff84a6179e8e23d090b9297dc041600bc100"}, {Index: " 5 ", Address: "one1est2gxcvavmtnzc7mhd73gzadm3xxcv5zczdtw", BlsPublicKey: "776f3b8704f4e1092a302a60e84f81e476c212d6f458092b696df420ea19ff84a6179e8e23d090b9297dc041600bc100"},
{Index: " 6 ", Address: "one1spshr72utf6rwxseaz339j09ed8p6f8ke370zj", BlsPublicKey: "2d61379e44a772e5757e27ee2b3874254f56073e6bd226eb8b160371cc3c18b8c4977bd3dcb71fd57dc62bf0e143fd08"}, {Index: " 6 ", Address: "one1spshr72utf6rwxseaz339j09ed8p6f8ke370zj", BlsPublicKey: "2d61379e44a772e5757e27ee2b3874254f56073e6bd226eb8b160371cc3c18b8c4977bd3dcb71fd57dc62bf0e143fd08"},
{Index: " 7 ", Address: "one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9", BlsPublicKey: "c4e4708b6cf2a2ceeb59981677e9821eebafc5cf483fb5364a28fa604cc0ce69beeed40f3f03815c9e196fdaec5f1097"}, {Index: " 7 ", Address: "one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9", BlsPublicKey: "c4e4708b6cf2a2ceeb59981677e9821eebafc5cf483fb5364a28fa604cc0ce69beeed40f3f03815c9e196fdaec5f1097"},
{Index: " 8 ", Address: "one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc", BlsPublicKey: "86dc2fdc2ceec18f6923b99fd86a68405c132e1005cf1df72dca75db0adfaeb53d201d66af37916d61f079f34f21fb96"},
{Index: " 9 ", Address: "one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep", BlsPublicKey: "49d15743b36334399f9985feb0753430a2b287b2d68b84495bbb15381854cbf01bca9d1d9f4c9c8f18509b2bfa6bd40f"},
{Index: " 10 ", Address: "one1z05g55zamqzfw9qs432n33gycdmyvs38xjemyl", BlsPublicKey: "95117937cd8c09acd2dfae847d74041a67834ea88662a7cbed1e170350bc329e53db151e5a0ef3e712e35287ae954818"},
{Index: " 11 ", Address: "one1ljznytjyn269azvszjlcqvpcj6hjm822yrcp2e", BlsPublicKey: "68ae289d73332872ec8d04ac256ca0f5453c88ad392730c5741b6055bc3ec3d086ab03637713a29f459177aaa8340615"},
} }
// LocalFnAccountsV2 are the accounts for the initial FN used for local test. // LocalFnAccountsV2 are the accounts for the initial FN used for local test.
var LocalFnAccountsV2 = []DeployAccount{ var LocalFnAccountsV2 = []DeployAccount{
{Index: " 0 ", Address: "one1a50tun737ulcvwy0yvve0pvu5skq0kjargvhwe", BlsPublicKey: "52ecce5f64db21cbe374c9268188f5d2cdd5bec1a3112276a350349860e35fb81f8cfe447a311e0550d961cf25cb988d"}, {Index: " 0 ", Address: "one1a50tun737ulcvwy0yvve0pvu5skq0kjargvhwe", BlsPublicKey: "52ecce5f64db21cbe374c9268188f5d2cdd5bec1a3112276a350349860e35fb81f8cfe447a311e0550d961cf25cb988d"},
{Index: " 1 ", Address: "one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg", BlsPublicKey: "1c1fb28d2de96e82c3d9b4917eb54412517e2763112a3164862a6ed627ac62e87ce274bb4ea36e6a61fb66a15c263a06"}, {Index: " 1 ", Address: "one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg", BlsPublicKey: "a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100"},
{Index: " 2 ", Address: "one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7", BlsPublicKey: "b179c4fdc0bee7bd0b6698b792837dd13404d3f985b59d4a9b1cd0641a76651e271518b61abbb6fbebd4acf963358604"}, {Index: " 2 ", Address: "one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7", BlsPublicKey: "678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c"},
{Index: " 3 ", Address: "one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k", BlsPublicKey: "63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512"}, {Index: " 3 ", Address: "one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k", BlsPublicKey: "63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512"},
{Index: " 4 ", Address: "one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc", BlsPublicKey: "16513c487a6bb76f37219f3c2927a4f281f9dd3fd6ed2e3a64e500de6545cf391dd973cc228d24f9bd01efe94912e714"}, {Index: " 4 ", Address: "one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc", BlsPublicKey: "16513c487a6bb76f37219f3c2927a4f281f9dd3fd6ed2e3a64e500de6545cf391dd973cc228d24f9bd01efe94912e714"},
{Index: " 5 ", Address: "one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep", BlsPublicKey: "576d3c48294e00d6be4a22b07b66a870ddee03052fe48a5abbd180222e5d5a1f8946a78d55b025de21635fd743bbad90"}, {Index: " 5 ", Address: "one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep", BlsPublicKey: "576d3c48294e00d6be4a22b07b66a870ddee03052fe48a5abbd180222e5d5a1f8946a78d55b025de21635fd743bbad90"},
@ -68,6 +72,4 @@ var LocalFnAccountsV2 = []DeployAccount{
{Index: " 7 ", Address: "one1d7jfnr6yraxnrycgaemyktkmhmajhp8kl0yahv", BlsPublicKey: "f47238daef97d60deedbde5302d05dea5de67608f11f406576e363661f7dcbc4a1385948549b31a6c70f6fde8a391486"}, {Index: " 7 ", Address: "one1d7jfnr6yraxnrycgaemyktkmhmajhp8kl0yahv", BlsPublicKey: "f47238daef97d60deedbde5302d05dea5de67608f11f406576e363661f7dcbc4a1385948549b31a6c70f6fde8a391486"},
{Index: " 8 ", Address: "one1r4zyyjqrulf935a479sgqlpa78kz7zlcg2jfen", BlsPublicKey: "fc4b9c535ee91f015efff3f32fbb9d32cdd9bfc8a837bb3eee89b8fff653c7af2050a4e147ebe5c7233dc2d5df06ee0a"}, {Index: " 8 ", Address: "one1r4zyyjqrulf935a479sgqlpa78kz7zlcg2jfen", BlsPublicKey: "fc4b9c535ee91f015efff3f32fbb9d32cdd9bfc8a837bb3eee89b8fff653c7af2050a4e147ebe5c7233dc2d5df06ee0a"},
{Index: " 9 ", Address: "one1p7ht2d4kl8ve7a8jxw746yfnx4wnfxtp8jqxwe", BlsPublicKey: "ca86e551ee42adaaa6477322d7db869d3e203c00d7b86c82ebee629ad79cb6d57b8f3db28336778ec2180e56a8e07296"}, {Index: " 9 ", Address: "one1p7ht2d4kl8ve7a8jxw746yfnx4wnfxtp8jqxwe", BlsPublicKey: "ca86e551ee42adaaa6477322d7db869d3e203c00d7b86c82ebee629ad79cb6d57b8f3db28336778ec2180e56a8e07296"},
{Index: " 10 ", Address: "one1z05g55zamqzfw9qs432n33gycdmyvs38xjemyl", BlsPublicKey: "95117937cd8c09acd2dfae847d74041a67834ea88662a7cbed1e170350bc329e53db151e5a0ef3e712e35287ae954818"},
{Index: " 11 ", Address: "one1ljznytjyn269azvszjlcqvpcj6hjm822yrcp2e", BlsPublicKey: "68ae289d73332872ec8d04ac256ca0f5453c88ad392730c5741b6055bc3ec3d086ab03637713a29f459177aaa8340615"},
} }

@ -15,6 +15,7 @@ import (
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
internal_common "github.com/harmony-one/harmony/internal/common"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
) )
@ -86,9 +87,10 @@ func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.A
// GetBalance returns the amount of Nano for the given address in the state of the // GetBalance returns the amount of Nano for the given address in the state of the
// given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta // given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta
// block numbers are also allowed. // block numbers are also allowed.
func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*hexutil.Big, error) { func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address string, blockNr rpc.BlockNumber) (*hexutil.Big, error) {
// TODO: currently only get latest balance. Will add complete logic later. // TODO: currently only get latest balance. Will add complete logic later.
return s.b.GetBalance(address) adr := internal_common.ParseAddr(address)
return s.b.GetBalance(adr)
} }
// BlockNumber returns the block number of the chain head. // BlockNumber returns the block number of the chain head.

@ -1,11 +1,8 @@
package shardchain package shardchain
import ( import (
"math/big"
"sync" "sync"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -22,7 +19,7 @@ import (
type Collection interface { type Collection interface {
// ShardChain returns the blockchain for the given shard, // ShardChain returns the blockchain for the given shard,
// opening one as necessary. // opening one as necessary.
ShardChain(shardID uint32, networkType nodeconfig.NetworkType) (*core.BlockChain, error) ShardChain(shardID uint32) (*core.BlockChain, error)
// CloseShardChain closes the given shard chain. // CloseShardChain closes the given shard chain.
CloseShardChain(shardID uint32) error CloseShardChain(shardID uint32) error
@ -40,6 +37,7 @@ type CollectionImpl struct {
mtx sync.Mutex mtx sync.Mutex
pool map[uint32]*core.BlockChain pool map[uint32]*core.BlockChain
disableCache bool disableCache bool
chainConfig *params.ChainConfig
} }
// NewCollection creates and returns a new shard chain collection. // NewCollection creates and returns a new shard chain collection.
@ -50,18 +48,20 @@ type CollectionImpl struct {
// the factory is brand new (empty). // the factory is brand new (empty).
func NewCollection( func NewCollection(
dbFactory DBFactory, dbInit DBInitializer, engine engine.Engine, dbFactory DBFactory, dbInit DBInitializer, engine engine.Engine,
chainConfig *params.ChainConfig,
) *CollectionImpl { ) *CollectionImpl {
return &CollectionImpl{ return &CollectionImpl{
dbFactory: dbFactory, dbFactory: dbFactory,
dbInit: dbInit, dbInit: dbInit,
engine: engine, engine: engine,
pool: make(map[uint32]*core.BlockChain), pool: make(map[uint32]*core.BlockChain),
chainConfig: chainConfig,
} }
} }
// ShardChain returns the blockchain for the given shard, // ShardChain returns the blockchain for the given shard,
// opening one as necessary. // opening one as necessary.
func (sc *CollectionImpl) ShardChain(shardID uint32, networkType nodeconfig.NetworkType) (*core.BlockChain, error) { func (sc *CollectionImpl) ShardChain(shardID uint32) (*core.BlockChain, error) {
sc.mtx.Lock() sc.mtx.Lock()
defer sc.mtx.Unlock() defer sc.mtx.Unlock()
if bc, ok := sc.pool[shardID]; ok { if bc, ok := sc.pool[shardID]; ok {
@ -93,18 +93,8 @@ func (sc *CollectionImpl) ShardChain(shardID uint32, networkType nodeconfig.Netw
cacheConfig = &core.CacheConfig{Disabled: true} cacheConfig = &core.CacheConfig{Disabled: true}
} }
chainConfig := params.ChainConfig{}
switch networkType {
case nodeconfig.Mainnet:
chainConfig = *params.MainnetChainConfig
default: // all other network types share testnet config
chainConfig = *params.TestnetChainConfig
}
chainConfig.ChainID = big.NewInt(int64(shardID))
bc, err := core.NewBlockChain( bc, err := core.NewBlockChain(
db, cacheConfig, &chainConfig, sc.engine, vm.Config{}, nil, db, cacheConfig, sc.chainConfig, sc.engine, vm.Config{}, nil,
) )
if err != nil { if err != nil {
return nil, ctxerror.New("cannot create blockchain").WithCause(err) return nil, ctxerror.New("cannot create blockchain").WithCause(err)

@ -0,0 +1,64 @@
package main
import (
"encoding/gob"
"flag"
"fmt"
"os"
"path"
"reflect"
"github.com/golang/mock/mockgen/model"
pkg_ "github.com/ethereum/go-ethereum/log"
)
var output = flag.String("output", "", "The output file name, or empty to use stdout.")
func main() {
flag.Parse()
its := []struct {
sym string
typ reflect.Type
}{
{"Handler", reflect.TypeOf((*pkg_.Handler)(nil)).Elem()},
}
pkg := &model.Package{
// NOTE: This behaves contrary to documented behaviour if the
// package name is not the final component of the import path.
// The reflect package doesn't expose the package name, though.
Name: path.Base("github.com/ethereum/go-ethereum/log"),
}
for _, it := range its {
intf, err := model.InterfaceFromInterfaceType(it.typ)
if err != nil {
fmt.Fprintf(os.Stderr, "Reflection: %v\n", err)
os.Exit(1)
}
intf.Name = it.sym
pkg.Interfaces = append(pkg.Interfaces, intf)
}
outfile := os.Stdout
if len(*output) != 0 {
var err error
outfile, err = os.Create(*output)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to open output file %q", *output)
}
defer func() {
if err := outfile.Close(); err != nil {
fmt.Fprintf(os.Stderr, "failed to close output file %q", *output)
os.Exit(1)
}
}()
}
if err := gob.NewEncoder(outfile).Encode(pkg); err != nil {
fmt.Fprintf(os.Stderr, "gob encode: %v\n", err)
os.Exit(1)
}
}

@ -3,10 +3,13 @@ package node
import ( import (
"crypto/ecdsa" "crypto/ecdsa"
"fmt" "fmt"
"math/big"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/accounts" "github.com/harmony-one/harmony/accounts"
"github.com/harmony-one/harmony/api/client" "github.com/harmony-one/harmony/api/client"
clientService "github.com/harmony-one/harmony/api/client/service" clientService "github.com/harmony-one/harmony/api/client/service"
@ -20,6 +23,7 @@ import (
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/drand" "github.com/harmony-one/harmony/drand"
"github.com/harmony-one/harmony/internal/chain"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/ctxerror" "github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/shardchain" "github.com/harmony-one/harmony/internal/shardchain"
@ -89,6 +93,11 @@ type Node struct {
pendingTransactions types.Transactions // All the transactions received but not yet processed for Consensus pendingTransactions types.Transactions // All the transactions received but not yet processed for Consensus
pendingTxMutex sync.Mutex pendingTxMutex sync.Mutex
DRand *drand.DRand // The instance for distributed randomness protocol DRand *drand.DRand // The instance for distributed randomness protocol
pendingCrossLinks []*types.Header
pendingClMutex sync.Mutex
pendingCXReceipts []*types.CXReceiptsProof // All the receipts received but not yet processed for Consensus
pendingCXMutex sync.Mutex
// Shard databases // Shard databases
shardChains shardchain.Collection shardChains shardchain.Collection
@ -120,7 +129,7 @@ type Node struct {
stateSync *syncing.StateSync stateSync *syncing.StateSync
beaconSync *syncing.StateSync beaconSync *syncing.StateSync
peerRegistrationRecord map[string]*syncConfig // record registration time (unixtime) of peers begin in syncing peerRegistrationRecord map[string]*syncConfig // record registration time (unixtime) of peers begin in syncing
dnsZone string SyncingPeerProvider SyncingPeerProvider
// The p2p host used to send/receive p2p messages // The p2p host used to send/receive p2p messages
host p2p.Host host p2p.Host
@ -204,7 +213,7 @@ type Node struct {
// Blockchain returns the blockchain for the node's current shard. // Blockchain returns the blockchain for the node's current shard.
func (node *Node) Blockchain() *core.BlockChain { func (node *Node) Blockchain() *core.BlockChain {
shardID := node.NodeConfig.ShardID shardID := node.NodeConfig.ShardID
bc, err := node.shardChains.ShardChain(shardID, node.NodeConfig.GetNetworkType()) bc, err := node.shardChains.ShardChain(shardID)
if err != nil { if err != nil {
err = ctxerror.New("cannot get shard chain", "shardID", shardID). err = ctxerror.New("cannot get shard chain", "shardID", shardID).
WithCause(err) WithCause(err)
@ -215,7 +224,7 @@ func (node *Node) Blockchain() *core.BlockChain {
// Beaconchain returns the beaconchain from node. // Beaconchain returns the beaconchain from node.
func (node *Node) Beaconchain() *core.BlockChain { func (node *Node) Beaconchain() *core.BlockChain {
bc, err := node.shardChains.ShardChain(0, node.NodeConfig.GetNetworkType()) bc, err := node.shardChains.ShardChain(0)
if err != nil { if err != nil {
err = ctxerror.New("cannot get beaconchain").WithCause(err) err = ctxerror.New("cannot get beaconchain").WithCause(err)
ctxerror.Log15(utils.GetLogger().Crit, err) ctxerror.Log15(utils.GetLogger().Crit, err)
@ -251,6 +260,16 @@ func (node *Node) AddPendingTransaction(newTx *types.Transaction) {
} }
} }
// AddPendingReceipts adds one receipt message to pending list.
func (node *Node) AddPendingReceipts(receipts *types.CXReceiptsProof) {
if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet {
node.pendingCXMutex.Lock()
node.pendingCXReceipts = append(node.pendingCXReceipts, receipts)
node.pendingCXMutex.Unlock()
utils.Logger().Error().Int("totalPendingReceipts", len(node.pendingCXReceipts)).Msg("Got ONE more receipt message")
}
}
// Take out a subset of valid transactions from the pending transaction list // Take out a subset of valid transactions from the pending transaction list
// Note the pending transaction list will then contain the rest of the txs // Note the pending transaction list will then contain the rest of the txs
func (node *Node) getTransactionsForNewBlock(maxNumTxs int, coinbase common.Address) types.Transactions { func (node *Node) getTransactionsForNewBlock(maxNumTxs int, coinbase common.Address) types.Transactions {
@ -262,7 +281,7 @@ func (node *Node) getTransactionsForNewBlock(maxNumTxs int, coinbase common.Addr
node.pendingTransactions = unselected node.pendingTransactions = unselected
node.reducePendingTransactions() node.reducePendingTransactions()
utils.Logger().Error(). utils.Logger().Info().
Int("remainPending", len(node.pendingTransactions)). Int("remainPending", len(node.pendingTransactions)).
Int("selected", len(selected)). Int("selected", len(selected)).
Int("invalidDiscarded", len(invalid)). Int("invalidDiscarded", len(invalid)).
@ -315,8 +334,15 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, chainDBFactory shardc
node.SelfPeer = host.GetSelfPeer() node.SelfPeer = host.GetSelfPeer()
} }
chainConfig := *params.TestnetChainConfig
if node.NodeConfig.GetNetworkType() == nodeconfig.Mainnet {
chainConfig = *params.MainnetChainConfig
}
// TODO: use 1 as mainnet, change to networkID instead
chainConfig.ChainID = big.NewInt(1)
collection := shardchain.NewCollection( collection := shardchain.NewCollection(
chainDBFactory, &genesisInitializer{&node}, consensusObj) chainDBFactory, &genesisInitializer{&node}, chain.Engine, &chainConfig)
if isArchival { if isArchival {
collection.DisableCache() collection.DisableCache()
} }
@ -327,18 +353,21 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, chainDBFactory shardc
node.Consensus = consensusObj node.Consensus = consensusObj
// Load the chains. // Load the chains.
chain := node.Blockchain() // this also sets node.isFirstTime if the DB is fresh blockchain := node.Blockchain() // this also sets node.isFirstTime if the DB is fresh
_ = node.Beaconchain() beaconChain := node.Beaconchain()
node.BlockChannel = make(chan *types.Block) node.BlockChannel = make(chan *types.Block)
node.ConfirmedBlockChannel = make(chan *types.Block) node.ConfirmedBlockChannel = make(chan *types.Block)
node.BeaconBlockChannel = make(chan *types.Block) node.BeaconBlockChannel = make(chan *types.Block)
node.TxPool = core.NewTxPool(core.DefaultTxPoolConfig, node.Blockchain().Config(), chain) node.TxPool = core.NewTxPool(core.DefaultTxPoolConfig, node.Blockchain().Config(), blockchain)
node.Worker = worker.New(node.Blockchain().Config(), chain, node.Consensus, node.Consensus.ShardID) node.Worker = worker.New(node.Blockchain().Config(), blockchain, chain.Engine, node.Consensus.ShardID)
if node.Blockchain().ShardID() != 0 {
node.BeaconWorker = worker.New(node.Beaconchain().Config(), beaconChain, chain.Engine, node.Consensus.ShardID)
}
node.Consensus.VerifiedNewBlock = make(chan *types.Block) node.Consensus.VerifiedNewBlock = make(chan *types.Block)
// the sequence number is the next block number to be added in consensus protocol, which is always one more than current chain header block // the sequence number is the next block number to be added in consensus protocol, which is always one more than current chain header block
node.Consensus.SetBlockNum(chain.CurrentBlock().NumberU64() + 1) node.Consensus.SetBlockNum(blockchain.CurrentBlock().NumberU64() + 1)
// Add Faucet contract to all shards, so that on testnet, we can demo wallet in explorer // Add Faucet contract to all shards, so that on testnet, we can demo wallet in explorer
// TODO (leo): we need to have support of cross-shard tx later so that the token can be transferred from beacon chain shard to other tx shards. // TODO (leo): we need to have support of cross-shard tx later so that the token can be transferred from beacon chain shard to other tx shards.
@ -350,16 +379,16 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, chainDBFactory shardc
node.AddContractKeyAndAddress(scFaucet) node.AddContractKeyAndAddress(scFaucet)
} }
if node.Consensus.ShardID == 0 { //if node.Consensus.ShardID == 0 {
// Contracts only exist in beacon chain // // Contracts only exist in beacon chain
if node.isFirstTime { // if node.isFirstTime {
// Setup one time smart contracts // // Setup one time smart contracts
node.CurrentStakes = make(map[common.Address]*structs.StakeInfo) // node.CurrentStakes = make(map[common.Address]*structs.StakeInfo)
node.AddStakingContractToPendingTransactions() //This will save the latest information about staked nodes in current staked // node.AddStakingContractToPendingTransactions() //This will save the latest information about staked nodes in current staked
} else { // } else {
node.AddContractKeyAndAddress(scStaking) // node.AddContractKeyAndAddress(scStaking)
} // }
} //}
node.ContractCaller = contracts.NewContractCaller(node.Blockchain(), node.Blockchain().Config()) node.ContractCaller = contracts.NewContractCaller(node.Blockchain(), node.Blockchain().Config())
@ -387,15 +416,6 @@ func New(host p2p.Host, consensusObj *consensus.Consensus, chainDBFactory shardc
// FIXME (leo): we use beacon client topic as the global topic for now // FIXME (leo): we use beacon client topic as the global topic for now
go node.ReceiveGlobalMessage() go node.ReceiveGlobalMessage()
// if metrics flag is set start the goroutine to collect metrics
if node.NodeConfig.MetricsFlag {
go node.CollectMetrics()
}
if node.NodeConfig.Role() == nodeconfig.ExplorerNode {
go node.CommitCommittee()
}
// Setup initial state of syncing. // Setup initial state of syncing.
node.peerRegistrationRecord = make(map[string]*syncConfig) node.peerRegistrationRecord = make(map[string]*syncConfig)
@ -519,9 +539,3 @@ func (node *Node) initNodeConfiguration() (service.NodeConfig, chan p2p.Peer) {
func (node *Node) AccountManager() *accounts.Manager { func (node *Node) AccountManager() *accounts.Manager {
return node.accountManager return node.accountManager
} }
// SetDNSZone sets the DNS zone to use to get peer info for node syncing
func (node *Node) SetDNSZone(zone string) {
utils.Logger().Info().Str("zone", zone).Msg("using DNS zone to get peers")
node.dnsZone = zone
}

@ -0,0 +1,278 @@
package node
import (
"encoding/binary"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils"
)
// ProcessHeaderMessage verify and process Node/Header message into crosslink when it's valid
func (node *Node) ProcessHeaderMessage(msgPayload []byte) {
if node.NodeConfig.ShardID == 0 {
var headers []*types.Header
err := rlp.DecodeBytes(msgPayload, &headers)
if err != nil {
utils.Logger().Error().
Err(err).
Msg("[ProcessingHeader] Crosslink Headers Broadcast Unable to Decode")
return
}
// Try to reprocess all the pending cross links
node.pendingClMutex.Lock()
crossLinkHeadersToProcess := node.pendingCrossLinks
node.pendingCrossLinks = []*types.Header{}
node.pendingClMutex.Unlock()
firstCrossLinkBlock := core.ShardingSchedule.FirstCrossLinkBlock()
for _, header := range headers {
if header.Number.Uint64() >= firstCrossLinkBlock {
// Only process cross link starting from FirstCrossLinkBlock
crossLinkHeadersToProcess = append(crossLinkHeadersToProcess, header)
}
}
headersToQuque := []*types.Header{}
for _, header := range crossLinkHeadersToProcess {
exist, err := node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64(), false)
if err == nil && exist != nil {
utils.Logger().Debug().
Msgf("[ProcessingHeader] Cross Link already exists, pass. Block num: %d", header.Number)
continue
}
if header.Number.Uint64() > firstCrossLinkBlock { // Directly trust the first cross-link
// Sanity check on the previous link with the new link
previousLink, err := node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64()-1, false)
if err != nil {
previousLink, err = node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64()-1, true)
if err != nil {
headersToQuque = append(headersToQuque, header)
continue
}
}
err = node.VerifyCrosslinkHeader(previousLink.Header(), header)
if err != nil {
utils.Logger().Warn().
Err(err).
Msgf("[ProcessingHeader] Failed to verify new cross link header for shardID %d, blockNum %d", header.ShardID, header.Number)
continue
}
}
crossLink := types.NewCrossLink(header)
utils.Logger().Debug().
Msgf("[ProcessingHeader] committing for shardID %d, blockNum %d", header.ShardID, header.Number.Uint64())
node.Blockchain().WriteCrossLinks(types.CrossLinks{crossLink}, true)
}
// Queue up the cross links that's in the future
node.pendingClMutex.Lock()
node.pendingCrossLinks = append(node.pendingCrossLinks, headersToQuque...)
node.pendingClMutex.Unlock()
}
}
func (node *Node) verifyIncomingReceipts(block *types.Block) error {
m := make(map[common.Hash]bool)
cxps := block.IncomingReceipts()
for _, cxp := range cxps {
if err := cxp.IsValidCXReceiptsProof(); err != nil {
return ctxerror.New("[verifyIncomingReceipts] verification failed").WithCause(err)
}
if node.Blockchain().IsSpent(cxp) {
return ctxerror.New("[verifyIncomingReceipts] Double Spent!")
}
hash := cxp.MerkleProof.BlockHash
// ignore duplicated receipts
if _, ok := m[hash]; ok {
return ctxerror.New("[verifyIncomingReceipts] Double Spent!")
}
m[hash] = true
if err := node.compareCrosslinkWithReceipts(cxp); err != nil {
return err
}
}
return nil
}
func (node *Node) compareCrosslinkWithReceipts(cxp *types.CXReceiptsProof) error {
var hash, outgoingReceiptHash common.Hash
shardID := cxp.MerkleProof.ShardID
blockNum := cxp.MerkleProof.BlockNum.Uint64()
beaconChain := node.Beaconchain()
if shardID == 0 {
block := beaconChain.GetBlockByNumber(blockNum)
if block == nil {
return ctxerror.New("[compareCrosslinkWithReceipts] Cannot get beaconchain header", "blockNum", blockNum, "shardID", shardID)
}
hash = block.Hash()
outgoingReceiptHash = block.OutgoingReceiptHash()
} else {
crossLink, err := beaconChain.ReadCrossLink(shardID, blockNum, false)
if err != nil {
return ctxerror.New("[compareCrosslinkWithReceipts] Cannot get crosslink", "blockNum", blockNum, "shardID", shardID).WithCause(err)
}
hash = crossLink.ChainHeader.Hash()
outgoingReceiptHash = crossLink.ChainHeader.OutgoingReceiptHash
}
// verify the source block hash is from a finalized block
if hash == cxp.MerkleProof.BlockHash && outgoingReceiptHash == cxp.MerkleProof.CXReceiptHash {
return nil
}
return ErrCrosslinkVerificationFail
}
// VerifyCrosslinkHeader verifies the header is valid against the prevHeader.
func (node *Node) VerifyCrosslinkHeader(prevHeader, header *types.Header) error {
// TODO: add fork choice rule
if prevHeader.Hash() != header.ParentHash {
return ctxerror.New("[CrossLink] Invalid cross link header - parent hash mismatch", "shardID", header.ShardID, "blockNum", header.Number)
}
// Verify signature of the new cross link header
shardState, err := node.Blockchain().ReadShardState(prevHeader.Epoch)
committee := shardState.FindCommitteeByID(prevHeader.ShardID)
if err != nil || committee == nil {
return ctxerror.New("[CrossLink] Failed to read shard state for cross link header", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
var committerKeys []*bls.PublicKey
parseKeysSuccess := true
for _, member := range committee.NodeList {
committerKey := new(bls.PublicKey)
err = member.BlsPublicKey.ToLibBLSPublicKey(committerKey)
if err != nil {
parseKeysSuccess = false
break
}
committerKeys = append(committerKeys, committerKey)
}
if !parseKeysSuccess {
return ctxerror.New("[CrossLink] cannot convert BLS public key", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
if header.Number.Uint64() > 1 { // First block doesn't have last sig
mask, err := bls_cosi.NewMask(committerKeys, nil)
if err != nil {
return ctxerror.New("cannot create group sig mask", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
if err := mask.SetMask(header.LastCommitBitmap); err != nil {
return ctxerror.New("cannot set group sig mask bits", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err)
}
aggSig := bls.Sign{}
err = aggSig.Deserialize(header.LastCommitSignature[:])
if err != nil {
return ctxerror.New("unable to deserialize multi-signature from payload").WithCause(err)
}
blockNumBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(blockNumBytes, header.Number.Uint64()-1)
commitPayload := append(blockNumBytes, header.ParentHash[:]...)
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) {
return ctxerror.New("Failed to verify the signature for cross link header ", "shardID", header.ShardID, "blockNum", header.Number)
}
}
return nil
}
// ProposeCrossLinkDataForBeaconchain propose cross links for beacon chain new block
func (node *Node) ProposeCrossLinkDataForBeaconchain() (types.CrossLinks, error) {
utils.Logger().Info().
Uint64("blockNum", node.Blockchain().CurrentBlock().NumberU64()+1).
Msg("Proposing cross links ...")
curBlock := node.Blockchain().CurrentBlock()
numShards := core.ShardingSchedule.InstanceForEpoch(curBlock.Header().Epoch).NumShards()
shardCrossLinks := make([]types.CrossLinks, numShards)
firstCrossLinkBlock := core.ShardingSchedule.FirstCrossLinkBlock()
for i := 0; i < int(numShards); i++ {
curShardID := uint32(i)
lastLink, err := node.Blockchain().ReadShardLastCrossLink(curShardID)
lastLinkblockNum := big.NewInt(int64(firstCrossLinkBlock))
blockNumoffset := 0
if err == nil && lastLink != nil {
blockNumoffset = 1
lastLinkblockNum = lastLink.BlockNum()
}
for true {
link, err := node.Blockchain().ReadCrossLink(curShardID, lastLinkblockNum.Uint64()+uint64(blockNumoffset), true)
if err != nil || link == nil {
break
}
if link.BlockNum().Uint64() > firstCrossLinkBlock {
if lastLink == nil {
utils.Logger().Debug().
Err(err).
Msgf("[CrossLink] Haven't received the first cross link %d", link.BlockNum().Uint64())
} else {
err := node.VerifyCrosslinkHeader(lastLink.Header(), link.Header())
if err != nil {
utils.Logger().Debug().
Err(err).
Msgf("[CrossLink] Failed verifying temp cross link %d", link.BlockNum().Uint64())
break
}
}
}
shardCrossLinks[i] = append(shardCrossLinks[i], *link)
lastLink = link
blockNumoffset++
}
}
crossLinksToPropose := types.CrossLinks{}
for _, crossLinks := range shardCrossLinks {
crossLinksToPropose = append(crossLinksToPropose, crossLinks...)
}
if len(crossLinksToPropose) != 0 {
crossLinksToPropose.Sort()
return crossLinksToPropose, nil
}
return types.CrossLinks{}, errors.New("No cross link to propose")
}
// ProcessReceiptMessage store the receipts and merkle proof in local data store
func (node *Node) ProcessReceiptMessage(msgPayload []byte) {
cxp := types.CXReceiptsProof{}
if err := rlp.DecodeBytes(msgPayload, &cxp); err != nil {
utils.Logger().Error().Err(err).Msg("[ProcessReceiptMessage] Unable to Decode message Payload")
return
}
if err := cxp.IsValidCXReceiptsProof(); err != nil {
utils.Logger().Error().Err(err).Msg("[ProcessReceiptMessage] Invalid CXReceiptsProof")
return
}
// TODO: check message signature is from the nodes of source shard.
// TODO: remove in future if not useful
node.Blockchain().WriteCXReceipts(cxp.MerkleProof.ShardID, cxp.MerkleProof.BlockNum.Uint64(), cxp.MerkleProof.BlockHash, cxp.Receipts, true)
node.AddPendingReceipts(&cxp)
}

@ -0,0 +1,10 @@
package node
import (
"errors"
)
var (
// ErrCrosslinkVerificationFail ...
ErrCrosslinkVerificationFail = errors.New("Crosslink Verification Failed")
)

@ -152,10 +152,13 @@ func (node *Node) commitBlockForExplorer(block *types.Block) {
// CommitCommittee commits committee with shard id and epoch to explorer service. // CommitCommittee commits committee with shard id and epoch to explorer service.
func (node *Node) CommitCommittee() { func (node *Node) CommitCommittee() {
events := make(chan core.ChainHeadEvent) events := make(chan core.ChainEvent)
node.Blockchain().SubscribeChainHeadEvent(events) node.Blockchain().SubscribeChainEvent(events)
for event := range events { for event := range events {
curBlock := event.Block curBlock := event.Block
if curBlock == nil {
continue
}
state, err := node.Blockchain().ReadShardState(curBlock.Epoch()) state, err := node.Blockchain().ReadShardState(curBlock.Epoch())
if err != nil { if err != nil {
utils.Logger().Error().Err(err).Msg("[Explorer] Error reading shard state") utils.Logger().Error().Err(err).Msg("[Explorer] Error reading shard state")
@ -166,7 +169,7 @@ func (node *Node) CommitCommittee() {
utils.Logger().Info().Msg("[Explorer] Dumping committee") utils.Logger().Info().Msg("[Explorer] Dumping committee")
err := explorer.GetStorageInstance(node.SelfPeer.IP, node.SelfPeer.Port, false).DumpCommittee(curBlock.ShardID(), curBlock.Epoch().Uint64(), committee) err := explorer.GetStorageInstance(node.SelfPeer.IP, node.SelfPeer.Port, false).DumpCommittee(curBlock.ShardID(), curBlock.Epoch().Uint64(), committee)
if err != nil { if err != nil {
utils.Logger().Warn().Err(err).Msgf("[Explorer] Eror dumping committee for block %d", curBlock.NumberU64()) utils.Logger().Warn().Err(err).Msgf("[Explorer] Error dumping committee for block %d", curBlock.NumberU64())
} }
} }
} }

@ -89,8 +89,8 @@ func (node *Node) SetupGenesisBlock(db ethdb.Database, shardID uint32, myShardSt
} }
// Initialize shard state // Initialize shard state
// TODO: add ShardID into chainconfig and change ChainID to NetworkID // TODO: use 1 for now as mainnet, change to networkID instead
chainConfig.ChainID = big.NewInt(int64(shardID)) // Use ChainID as piggybacked ShardID chainConfig.ChainID = big.NewInt(1)
gspec := core.Genesis{ gspec := core.Genesis{
Config: &chainConfig, Config: &chainConfig,

@ -171,13 +171,15 @@ func (node *Node) messageHandler(content []byte, sender libp2p_peer.ID) {
} else { } else {
// for non-beaconchain node, subscribe to beacon block broadcast // for non-beaconchain node, subscribe to beacon block broadcast
role := node.NodeConfig.Role() role := node.NodeConfig.Role()
if proto_node.BlockMessageType(msgPayload[0]) == proto_node.Sync && role == nodeconfig.Validator { if role == nodeconfig.Validator {
utils.Logger().Info().
Uint64("block", blocks[0].NumberU64()).
Msg("Block being handled by block channel")
for _, block := range blocks { for _, block := range blocks {
node.BeaconBlockChannel <- block if block.ShardID() == 0 {
utils.Logger().Info().
Uint64("block", blocks[0].NumberU64()).
Msgf("Block being handled by block channel %d %d", block.NumberU64(), block.ShardID())
node.BeaconBlockChannel <- block
}
} }
} }
if node.Client != nil && node.Client.UpdateBlocks != nil && blocks != nil { if node.Client != nil && node.Client.UpdateBlocks != nil && blocks != nil {
@ -185,6 +187,19 @@ func (node *Node) messageHandler(content []byte, sender libp2p_peer.ID) {
node.Client.UpdateBlocks(blocks) node.Client.UpdateBlocks(blocks)
} }
} }
case proto_node.Header:
// only beacon chain will accept the header from other shards
utils.Logger().Debug().Msg("NET: received message: Node/Header")
if node.NodeConfig.ShardID != 0 {
return
}
node.ProcessHeaderMessage(msgPayload[1:]) // skip first byte which is blockMsgType
case proto_node.Receipt:
utils.Logger().Debug().Msg("NET: received message: Node/Receipt")
node.ProcessReceiptMessage(msgPayload[1:]) // skip first byte which is blockMsgType
} }
case proto_node.PING: case proto_node.PING:
node.pingMessageHandler(msgPayload, sender) node.pingMessageHandler(msgPayload, sender)
@ -264,9 +279,59 @@ func (node *Node) transactionMessageHandler(msgPayload []byte) {
// NOTE: For now, just send to the client (basically not broadcasting) // NOTE: For now, just send to the client (basically not broadcasting)
// TODO (lc): broadcast the new blocks to new nodes doing state sync // TODO (lc): broadcast the new blocks to new nodes doing state sync
func (node *Node) BroadcastNewBlock(newBlock *types.Block) { func (node *Node) BroadcastNewBlock(newBlock *types.Block) {
if node.ClientPeer != nil { groups := []p2p.GroupID{node.NodeConfig.GetClientGroupID()}
utils.Logger().Info().Msg("Broadcasting new block to client") utils.Logger().Info().Msgf("broadcasting new block %d, group %s", newBlock.NumberU64(), groups[0])
node.host.SendMessageToGroups([]p2p.GroupID{node.NodeConfig.GetClientGroupID()}, host.ConstructP2pMessage(byte(0), proto_node.ConstructBlocksSyncMessage([]*types.Block{newBlock}))) msg := host.ConstructP2pMessage(byte(0), proto_node.ConstructBlocksSyncMessage([]*types.Block{newBlock}))
if err := node.host.SendMessageToGroups(groups, msg); err != nil {
utils.Logger().Warn().Err(err).Msg("cannot broadcast new block")
}
}
// BroadcastCrossLinkHeader is called by consensus leader to send the new header as cross link to beacon chain.
func (node *Node) BroadcastCrossLinkHeader(newBlock *types.Block) {
utils.Logger().Info().Msgf("Broadcasting new header to beacon chain groupID %s", node.NodeConfig.GetBeaconGroupID())
lastThreeHeaders := []*types.Header{}
block := node.Blockchain().GetBlockByNumber(newBlock.NumberU64() - 2)
if block != nil {
lastThreeHeaders = append(lastThreeHeaders, block.Header())
}
block = node.Blockchain().GetBlockByNumber(newBlock.NumberU64() - 1)
if block != nil {
lastThreeHeaders = append(lastThreeHeaders, block.Header())
}
lastThreeHeaders = append(lastThreeHeaders, newBlock.Header())
node.host.SendMessageToGroups([]p2p.GroupID{node.NodeConfig.GetBeaconGroupID()}, host.ConstructP2pMessage(byte(0), proto_node.ConstructCrossLinkHeadersMessage(lastThreeHeaders)))
}
// BroadcastCXReceipts broadcasts cross shard receipts to correspoding
// destination shards
func (node *Node) BroadcastCXReceipts(newBlock *types.Block) {
epoch := newBlock.Header().Epoch
shardingConfig := core.ShardingSchedule.InstanceForEpoch(epoch)
shardNum := int(shardingConfig.NumShards())
myShardID := node.Consensus.ShardID
utils.Logger().Info().Int("shardNum", shardNum).Uint32("myShardID", myShardID).Uint64("blockNum", newBlock.NumberU64()).Msg("[BroadcastCXReceipts]")
for i := 0; i < shardNum; i++ {
if i == int(myShardID) {
continue
}
cxReceipts, err := node.Blockchain().ReadCXReceipts(uint32(i), newBlock.NumberU64(), newBlock.Hash(), false)
if err != nil || len(cxReceipts) == 0 {
//utils.Logger().Warn().Err(err).Uint32("ToShardID", uint32(i)).Int("numCXReceipts", len(cxReceipts)).Msg("[BroadcastCXReceipts] No ReadCXReceipts found")
continue
}
merkleProof, err := node.Blockchain().CXMerkleProof(uint32(i), newBlock)
if err != nil {
utils.Logger().Warn().Uint32("ToShardID", uint32(i)).Msg("[BroadcastCXReceipts] Unable to get merkleProof")
continue
}
utils.Logger().Info().Uint32("ToShardID", uint32(i)).Msg("[BroadcastCXReceipts] ReadCXReceipts and MerkleProof Found")
groupID := p2p.ShardID(i)
go node.host.SendMessageToGroups([]p2p.GroupID{p2p.NewGroupIDByShardID(groupID)}, host.ConstructP2pMessage(byte(0), proto_node.ConstructCXReceiptsProof(cxReceipts, merkleProof)))
} }
} }
@ -274,6 +339,13 @@ func (node *Node) BroadcastNewBlock(newBlock *types.Block) {
func (node *Node) VerifyNewBlock(newBlock *types.Block) error { func (node *Node) VerifyNewBlock(newBlock *types.Block) error {
// TODO ek – where do we verify parent-child invariants, // TODO ek – where do we verify parent-child invariants,
// e.g. "child.Number == child.IsGenesis() ? 0 : parent.Number+1"? // e.g. "child.Number == child.IsGenesis() ? 0 : parent.Number+1"?
if newBlock.NumberU64() > 1 {
err := core.VerifyBlockLastCommitSigs(node.Blockchain(), newBlock)
if err != nil {
return err
}
}
if newBlock.ShardID() != node.Blockchain().ShardID() { if newBlock.ShardID() != node.Blockchain().ShardID() {
return ctxerror.New("wrong shard ID", return ctxerror.New("wrong shard ID",
"my shard ID", node.Blockchain().ShardID(), "my shard ID", node.Blockchain().ShardID(),
@ -287,6 +359,20 @@ func (node *Node) VerifyNewBlock(newBlock *types.Block) error {
).WithCause(err) ).WithCause(err)
} }
// Verify cross links
if node.NodeConfig.ShardID == 0 {
err := node.VerifyBlockCrossLinks(newBlock)
if err != nil {
return err
}
}
err = node.verifyIncomingReceipts(newBlock)
if err != nil {
return ctxerror.New("[VerifyNewBlock] Cannot ValidateNewBlock", "blockHash", newBlock.Hash(),
"numIncomingReceipts", len(newBlock.IncomingReceipts())).WithCause(err)
}
// TODO: verify the vrf randomness // TODO: verify the vrf randomness
// _ = newBlock.Header().Vrf // _ = newBlock.Header().Vrf
@ -298,6 +384,70 @@ func (node *Node) VerifyNewBlock(newBlock *types.Block) error {
return nil return nil
} }
// VerifyBlockCrossLinks verifies the cross links of the block
func (node *Node) VerifyBlockCrossLinks(block *types.Block) error {
if len(block.Header().CrossLinks) == 0 {
return nil
}
crossLinks := &types.CrossLinks{}
err := rlp.DecodeBytes(block.Header().CrossLinks, crossLinks)
if err != nil {
return ctxerror.New("[CrossLinkVerification] failed to decode cross links",
"blockHash", block.Hash(),
"crossLinks", len(block.Header().CrossLinks),
).WithCause(err)
}
if !crossLinks.IsSorted() {
return ctxerror.New("[CrossLinkVerification] cross links are not sorted",
"blockHash", block.Hash(),
"crossLinks", len(block.Header().CrossLinks),
)
}
firstCrossLinkBlock := core.ShardingSchedule.FirstCrossLinkBlock()
for i, crossLink := range *crossLinks {
lastLink := &types.CrossLink{}
if i == 0 {
if crossLink.BlockNum().Uint64() > firstCrossLinkBlock {
lastLink, err = node.Blockchain().ReadShardLastCrossLink(crossLink.ShardID())
if err != nil {
return ctxerror.New("[CrossLinkVerification] no last cross link found 1",
"blockHash", block.Hash(),
"crossLink", lastLink,
).WithCause(err)
}
}
} else {
if (*crossLinks)[i-1].Header().ShardID != crossLink.Header().ShardID {
if crossLink.BlockNum().Uint64() > firstCrossLinkBlock {
lastLink, err = node.Blockchain().ReadShardLastCrossLink(crossLink.ShardID())
if err != nil {
return ctxerror.New("[CrossLinkVerification] no last cross link found 2",
"blockHash", block.Hash(),
"crossLink", lastLink,
).WithCause(err)
}
}
} else {
lastLink = &(*crossLinks)[i-1]
}
}
if crossLink.BlockNum().Uint64() > firstCrossLinkBlock { // TODO: verify genesis block
err = node.VerifyCrosslinkHeader(lastLink.Header(), crossLink.Header())
if err != nil {
return ctxerror.New("cannot ValidateNewBlock",
"blockHash", block.Hash(),
"numTx", len(block.Transactions()),
).WithCause(err)
}
}
}
return nil
}
// BigMaxUint64 is maximum possible uint64 value, that is, (1**64)-1. // BigMaxUint64 is maximum possible uint64 value, that is, (1**64)-1.
var BigMaxUint64 = new(big.Int).SetBytes([]byte{ var BigMaxUint64 = new(big.Int).SetBytes([]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
@ -408,24 +558,33 @@ func (node *Node) validateNewShardState(block *types.Block, stakeInfo *map[commo
// PostConsensusProcessing is called by consensus participants, after consensus is done, to: // PostConsensusProcessing is called by consensus participants, after consensus is done, to:
// 1. add the new block to blockchain // 1. add the new block to blockchain
// 2. [leader] send new block to the client // 2. [leader] send new block to the client
// 3. [leader] send cross shard tx receipts to destination shard
func (node *Node) PostConsensusProcessing(newBlock *types.Block) { func (node *Node) PostConsensusProcessing(newBlock *types.Block) {
if err := node.AddNewBlock(newBlock); err != nil {
utils.Logger().Error().
Err(err).
Msg("Error when adding new block")
return
} else if core.IsEpochLastBlock(newBlock) {
node.Consensus.UpdateConsensusInformation()
}
// Update last consensus time for metrics // Update last consensus time for metrics
node.lastConsensusTime = time.Now().Unix() node.lastConsensusTime = time.Now().Unix()
if node.Consensus.PubKey.IsEqual(node.Consensus.LeaderPubKey) { if node.Consensus.PubKey.IsEqual(node.Consensus.LeaderPubKey) {
node.BroadcastNewBlock(newBlock) if node.NodeConfig.ShardID == 0 {
node.BroadcastNewBlock(newBlock)
} else {
node.BroadcastCrossLinkHeader(newBlock)
}
node.BroadcastCXReceipts(newBlock)
} else { } else {
utils.Logger().Info(). utils.Logger().Info().
Uint64("ViewID", node.Consensus.GetViewID()). Uint64("ViewID", node.Consensus.GetViewID()).
Msg("BINGO !!! Reached Consensus") Msg("BINGO !!! Reached Consensus")
} }
if err := node.AddNewBlock(newBlock); err != nil { node.Blockchain().CleanCXReceiptsCheckpointsByBlock(newBlock)
utils.Logger().Error().
Err(err).
Msg("Error when adding new block")
} else if core.IsEpochLastBlock(newBlock) {
node.Consensus.UpdateConsensusInformation()
}
if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet { if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet {
// Update contract deployer's nonce so default contract like faucet can issue transaction with current nonce // Update contract deployer's nonce so default contract like faucet can issue transaction with current nonce

@ -2,10 +2,12 @@ package node
import ( import (
"math/big" "math/big"
"sort"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
@ -49,20 +51,23 @@ func (node *Node) WaitForConsensusReadyv2(readySignal chan struct{}, stopChan ch
} }
coinbase := node.Consensus.SelfAddress coinbase := node.Consensus.SelfAddress
if err := node.Worker.UpdateCurrent(coinbase); err != nil {
utils.Logger().Error().
Err(err).
Msg("Failed updating worker's state")
continue
}
// Normal tx block consensus // Normal tx block consensus
selectedTxs := types.Transactions{} // Empty transaction list selectedTxs := types.Transactions{} // Empty transaction list
if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet { if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet {
selectedTxs = node.getTransactionsForNewBlock(MaxNumberOfTransactionsPerBlock, coinbase) selectedTxs = node.getTransactionsForNewBlock(MaxNumberOfTransactionsPerBlock, coinbase)
if err := node.Worker.UpdateCurrent(coinbase); err != nil {
utils.Logger().Error().
Err(err).
Msg("Failed updating worker's state")
}
} }
utils.Logger().Info(). utils.Logger().Info().
Uint64("blockNum", node.Blockchain().CurrentBlock().NumberU64()+1). Uint64("blockNum", node.Blockchain().CurrentBlock().NumberU64()+1).
Int("selectedTxs", len(selectedTxs)). Int("selectedTxs", len(selectedTxs)).
Msg("PROPOSING NEW BLOCK ------------------------------------------------") Msg("PROPOSING NEW BLOCK ------------------------------------------------")
if err := node.Worker.CommitTransactions(selectedTxs, coinbase); err != nil { if err := node.Worker.CommitTransactions(selectedTxs, coinbase); err != nil {
ctxerror.Log15(utils.GetLogger().Error, ctxerror.Log15(utils.GetLogger().Error,
ctxerror.New("cannot commit transactions"). ctxerror.New("cannot commit transactions").
@ -75,20 +80,38 @@ func (node *Node) WaitForConsensusReadyv2(readySignal chan struct{}, stopChan ch
WithCause(err)) WithCause(err))
continue continue
} }
// Propose cross shard receipts
receiptsList := node.proposeReceiptsProof()
if len(receiptsList) != 0 {
if err := node.Worker.CommitReceipts(receiptsList); err != nil {
ctxerror.Log15(utils.GetLogger().Error,
ctxerror.New("cannot commit receipts").
WithCause(err))
}
}
viewID := node.Consensus.GetViewID() viewID := node.Consensus.GetViewID()
// add aggregated commit signatures from last block, except for the first two blocks // add aggregated commit signatures from last block, except for the first two blocks
if node.NodeConfig.GetNetworkType() == nodeconfig.Mainnet { if node.NodeConfig.ShardID == 0 {
if err = node.Worker.UpdateCurrent(coinbase); err != nil { crossLinksToPropose, localErr := node.ProposeCrossLinkDataForBeaconchain()
utils.Logger().Debug(). if localErr == nil {
Err(err). data, localErr := rlp.EncodeToBytes(crossLinksToPropose)
Msg("Failed updating worker's state") if localErr == nil {
continue newBlock, err = node.Worker.CommitWithCrossLinks(sig, mask, viewID, coinbase, data)
utils.Logger().Debug().
Uint64("blockNum", newBlock.NumberU64()).
Int("numCrossLinks", len(crossLinksToPropose)).
Msg("Successfully added cross links into new block")
}
} else {
newBlock, err = node.Worker.Commit(sig, mask, viewID, coinbase)
} }
} else {
newBlock, err = node.Worker.Commit(sig, mask, viewID, coinbase)
} }
newBlock, err = node.Worker.Commit(sig, mask, viewID, coinbase)
if err != nil { if err != nil {
ctxerror.Log15(utils.GetLogger().Error, ctxerror.Log15(utils.GetLogger().Error,
ctxerror.New("cannot commit new block"). ctxerror.New("cannot commit new block").
@ -117,7 +140,7 @@ func (node *Node) WaitForConsensusReadyv2(readySignal chan struct{}, stopChan ch
} }
func (node *Node) proposeShardStateWithoutBeaconSync(block *types.Block) error { func (node *Node) proposeShardStateWithoutBeaconSync(block *types.Block) error {
if !core.IsEpochLastBlock(block) { if block == nil || !core.IsEpochLastBlock(block) {
return nil return nil
} }
nextEpoch := new(big.Int).Add(block.Header().Epoch, common.Big1) nextEpoch := new(big.Int).Add(block.Header().Epoch, common.Big1)
@ -182,3 +205,40 @@ func (node *Node) proposeLocalShardState(block *types.Block) {
logger.Error().Err(err).Msg("Failed proposin local shard state") logger.Error().Err(err).Msg("Failed proposin local shard state")
} }
} }
func (node *Node) proposeReceiptsProof() []*types.CXReceiptsProof {
validReceiptsList := []*types.CXReceiptsProof{}
pendingReceiptsList := []*types.CXReceiptsProof{}
node.pendingCXMutex.Lock()
sort.Slice(node.pendingCXReceipts, func(i, j int) bool {
return node.pendingCXReceipts[i].MerkleProof.ShardID < node.pendingCXReceipts[j].MerkleProof.ShardID || (node.pendingCXReceipts[i].MerkleProof.ShardID == node.pendingCXReceipts[j].MerkleProof.ShardID && node.pendingCXReceipts[i].MerkleProof.BlockNum.Cmp(node.pendingCXReceipts[j].MerkleProof.BlockNum) < 0)
})
m := make(map[common.Hash]bool)
for _, cxp := range node.pendingCXReceipts {
// check double spent
if node.Blockchain().IsSpent(cxp) {
continue
}
hash := cxp.MerkleProof.BlockHash
// ignore duplicated receipts
if _, ok := m[hash]; ok {
continue
} else {
m[hash] = true
}
if err := node.compareCrosslinkWithReceipts(cxp); err != nil {
if err != ErrCrosslinkVerificationFail {
pendingReceiptsList = append(pendingReceiptsList, cxp)
}
} else {
validReceiptsList = append(validReceiptsList, cxp)
}
}
node.pendingCXReceipts = pendingReceiptsList
node.pendingCXMutex.Unlock()
return validReceiptsList
}

@ -7,8 +7,8 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/pkg/errors"
"github.com/harmony-one/harmony/api/service/syncing" "github.com/harmony-one/harmony/api/service/syncing"
"github.com/harmony-one/harmony/api/service/syncing/downloader" "github.com/harmony-one/harmony/api/service/syncing/downloader"
@ -16,7 +16,6 @@ import (
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/node/worker" "github.com/harmony-one/harmony/node/worker"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
@ -32,7 +31,7 @@ const (
// getNeighborPeers is a helper function to return list of peers // getNeighborPeers is a helper function to return list of peers
// based on different neightbor map // based on different neightbor map
func (node *Node) getNeighborPeers(neighbor *sync.Map) []p2p.Peer { func getNeighborPeers(neighbor *sync.Map) []p2p.Peer {
tmp := []p2p.Peer{} tmp := []p2p.Peer{}
neighbor.Range(func(k, v interface{}) bool { neighbor.Range(func(k, v interface{}) bool {
p := v.(p2p.Peer) p := v.(p2p.Peer)
@ -46,7 +45,7 @@ func (node *Node) getNeighborPeers(neighbor *sync.Map) []p2p.Peer {
// DoSyncWithoutConsensus gets sync-ed to blockchain without joining consensus // DoSyncWithoutConsensus gets sync-ed to blockchain without joining consensus
func (node *Node) DoSyncWithoutConsensus() { func (node *Node) DoSyncWithoutConsensus() {
go node.DoSyncing(node.Blockchain(), node.Worker, node.GetSyncingPeers, false) //Don't join consensus go node.DoSyncing(node.Blockchain(), node.Worker, false) //Don't join consensus
} }
// IsSameHeight tells whether node is at same bc height as a peer // IsSameHeight tells whether node is at same bc height as a peer
@ -57,35 +56,106 @@ func (node *Node) IsSameHeight() (uint64, bool) {
return node.stateSync.IsSameBlockchainHeight(node.Blockchain()) return node.stateSync.IsSameBlockchainHeight(node.Blockchain())
} }
// GetBeaconSyncingPeers returns a list of peers for beaconchain syncing // SyncingPeerProvider is an interface for getting the peers in the given shard.
func (node *Node) GetBeaconSyncingPeers() []p2p.Peer { type SyncingPeerProvider interface {
return node.getNeighborPeers(&node.BeaconNeighbors) SyncingPeers(shardID uint32) (peers []p2p.Peer, err error)
}
// LegacySyncingPeerProvider uses neighbor lists stored in a Node to serve
// syncing peer list query.
type LegacySyncingPeerProvider struct {
node *Node
shardID func() uint32
}
// NewLegacySyncingPeerProvider creates and returns a new node-based syncing
// peer provider.
func NewLegacySyncingPeerProvider(node *Node) *LegacySyncingPeerProvider {
var shardID func() uint32
if node.shardChains != nil {
shardID = node.Blockchain().ShardID
}
return &LegacySyncingPeerProvider{node: node, shardID: shardID}
}
// SyncingPeers returns peers stored in neighbor maps in the node structure.
func (p *LegacySyncingPeerProvider) SyncingPeers(shardID uint32) (peers []p2p.Peer, err error) {
switch shardID {
case p.shardID():
peers = getNeighborPeers(&p.node.Neighbors)
case 0:
peers = getNeighborPeers(&p.node.BeaconNeighbors)
default:
return nil, errors.Errorf("unsupported shard ID %v", shardID)
}
return peers, nil
} }
// GetSyncingPeers returns list of peers for regular shard syncing. // DNSSyncingPeerProvider uses the given DNS zone to resolve syncing peers.
func (node *Node) GetSyncingPeers() []p2p.Peer { type DNSSyncingPeerProvider struct {
return node.getNeighborPeers(&node.Neighbors) zone, port string
lookupHost func(name string) (addrs []string, err error)
} }
// GetPeersFromDNS get peers from our DNS server; TODO: temp fix for resolve node syncing // NewDNSSyncingPeerProvider returns a provider that uses given DNS name and
// the GetSyncingPeers return a bunch of "new" peers, all of them are out of sync // port number to resolve syncing peers.
func (node *Node) GetPeersFromDNS() []p2p.Peer { func NewDNSSyncingPeerProvider(zone, port string) *DNSSyncingPeerProvider {
if node.dnsZone == "" { return &DNSSyncingPeerProvider{
return nil zone: zone,
port: port,
lookupHost: net.LookupHost,
} }
shardID := node.Consensus.ShardID }
dns := fmt.Sprintf("s%d.%s", shardID, node.dnsZone)
addrs, err := net.LookupHost(dns) // SyncingPeers resolves DNS name into peers and returns them.
func (p *DNSSyncingPeerProvider) SyncingPeers(shardID uint32) (peers []p2p.Peer, err error) {
dns := fmt.Sprintf("s%d.%s", shardID, p.zone)
addrs, err := p.lookupHost(dns)
if err != nil { if err != nil {
utils.Logger().Debug().Msg("[SYNC] GetPeersFromDNS cannot find peers") return nil, errors.Wrapf(err,
return nil "[SYNC] cannot find peers using DNS name %#v", dns)
} }
port := syncing.GetSyncingPort(node.SelfPeer.Port)
peers := []p2p.Peer{}
for _, addr := range addrs { for _, addr := range addrs {
peers = append(peers, p2p.Peer{IP: addr, Port: port}) peers = append(peers, p2p.Peer{IP: addr, Port: p.port})
} }
return peers return peers, nil
}
// LocalSyncingPeerProvider uses localnet deployment convention to synthesize
// syncing peers.
type LocalSyncingPeerProvider struct {
basePort, selfPort uint16
numShards, shardSize uint32
}
// NewLocalSyncingPeerProvider returns a provider that synthesizes syncing
// peers given the network configuration
func NewLocalSyncingPeerProvider(
basePort, selfPort uint16, numShards, shardSize uint32,
) *LocalSyncingPeerProvider {
return &LocalSyncingPeerProvider{
basePort: basePort,
selfPort: selfPort,
numShards: numShards,
shardSize: shardSize,
}
}
// SyncingPeers returns local syncing peers using the sharding configuration.
func (p *LocalSyncingPeerProvider) SyncingPeers(shardID uint32) (peers []p2p.Peer, err error) {
if shardID >= p.numShards {
return nil, errors.Errorf(
"shard ID %d out of range 0..%d", shardID, p.numShards-1)
}
firstPort := uint32(p.basePort) + shardID
endPort := uint32(p.basePort) + p.numShards*p.shardSize
for port := firstPort; port < endPort; port += p.numShards {
if port == uint32(p.selfPort) {
continue // do not sync from self
}
peers = append(peers, p2p.Peer{IP: "127.0.0.1", Port: fmt.Sprint(port)})
}
return peers, nil
} }
// DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks // DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks
@ -94,12 +164,20 @@ func (node *Node) DoBeaconSyncing() {
select { select {
case beaconBlock := <-node.BeaconBlockChannel: case beaconBlock := <-node.BeaconBlockChannel:
if node.beaconSync == nil { if node.beaconSync == nil {
utils.Logger().Info().Msg("initializing beacon sync")
node.beaconSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID()) node.beaconSync = syncing.CreateStateSync(node.SelfPeer.IP, node.SelfPeer.Port, node.GetSyncID())
} }
if node.beaconSync.GetActivePeerNumber() == 0 { if node.beaconSync.GetActivePeerNumber() == 0 {
peers := node.GetBeaconSyncingPeers() utils.Logger().Info().Msg("no peers; bootstrapping beacon sync config")
peers, err := node.SyncingPeerProvider.SyncingPeers(0)
if err != nil {
utils.Logger().Warn().
Err(err).
Msg("cannot retrieve beacon syncing peers")
continue
}
if err := node.beaconSync.CreateSyncConfig(peers, true); err != nil { if err := node.beaconSync.CreateSyncConfig(peers, true); err != nil {
ctxerror.Log15(utils.GetLogInstance().Debug, err) utils.Logger().Warn().Err(err).Msg("cannot create beacon sync config")
continue continue
} }
} }
@ -110,7 +188,7 @@ func (node *Node) DoBeaconSyncing() {
} }
// DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up // DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up
func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, getPeers func() []p2p.Peer, willJoinConsensus bool) { func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool) {
ticker := time.NewTicker(SyncFrequency * time.Second) ticker := time.NewTicker(SyncFrequency * time.Second)
SyncingLoop: SyncingLoop:
@ -123,9 +201,20 @@ SyncingLoop:
utils.Logger().Debug().Msg("[SYNC] initialized state sync") utils.Logger().Debug().Msg("[SYNC] initialized state sync")
} }
if node.stateSync.GetActivePeerNumber() < MinConnectedPeers { if node.stateSync.GetActivePeerNumber() < MinConnectedPeers {
peers := getPeers() shardID := bc.ShardID()
peers, err := node.SyncingPeerProvider.SyncingPeers(shardID)
if err != nil {
utils.Logger().Warn().
Err(err).
Uint32("shard_id", shardID).
Msg("cannot retrieve syncing peers")
continue SyncingLoop
}
if err := node.stateSync.CreateSyncConfig(peers, false); err != nil { if err := node.stateSync.CreateSyncConfig(peers, false); err != nil {
utils.Logger().Debug().Msg("[SYNC] create peers error") utils.Logger().Warn().
Err(err).
Interface("peers", peers).
Msg("[SYNC] create peers error")
continue SyncingLoop continue SyncingLoop
} }
utils.Logger().Debug().Int("len", node.stateSync.GetActivePeerNumber()).Msg("[SYNC] Get Active Peers") utils.Logger().Debug().Int("len", node.stateSync.GetActivePeerNumber()).Msg("[SYNC] Get Active Peers")
@ -170,11 +259,7 @@ func (node *Node) SupportSyncing() {
go node.SendNewBlockToUnsync() go node.SendNewBlockToUnsync()
} }
if node.dnsZone != "" { go node.DoSyncing(node.Blockchain(), node.Worker, !isExplorerNode)
go node.DoSyncing(node.Blockchain(), node.Worker, node.GetPeersFromDNS, !isExplorerNode)
} else {
go node.DoSyncing(node.Blockchain(), node.Worker, node.GetSyncingPeers, !isExplorerNode)
}
} }
// InitSyncingServer starts downloader server. // InitSyncingServer starts downloader server.
@ -271,6 +356,7 @@ func (node *Node) CalculateResponse(request *downloader_pb.DownloaderRequest, in
continue continue
} }
encodedBlock, err := rlp.EncodeToBytes(block) encodedBlock, err := rlp.EncodeToBytes(block)
if err == nil { if err == nil {
response.Payload = append(response.Payload, encodedBlock) response.Payload = append(response.Payload, encodedBlock)
} }

@ -1,11 +1,15 @@
package node package node
import ( import (
"errors"
"fmt" "fmt"
"os" "os"
"sync"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
bls2 "github.com/harmony-one/harmony/crypto/bls" bls2 "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/shardchain" "github.com/harmony-one/harmony/internal/shardchain"
@ -50,32 +54,125 @@ func TestNewNode(t *testing.T) {
} }
} }
func TestGetSyncingPeers(t *testing.T) { func TestLegacySyncingPeerProvider(t *testing.T) {
blsKey := bls2.RandPrivateKey() t.Run("ShardChain", func(t *testing.T) {
pubKey := blsKey.GetPublicKey() p := makeLegacySyncingPeerProvider()
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey} expectedPeers := []p2p.Peer{
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902") {IP: "127.0.0.1", Port: "6001"},
host, err := p2pimpl.NewHost(&leader, priKey) {IP: "127.0.0.1", Port: "6003"},
if err != nil { }
t.Fatalf("newhost failure: %v", err) actualPeers, err := p.SyncingPeers(1)
} if assert.NoError(t, err) {
assert.ElementsMatch(t, actualPeers, expectedPeers)
}
})
t.Run("BeaconChain", func(t *testing.T) {
p := makeLegacySyncingPeerProvider()
expectedPeers := []p2p.Peer{
{IP: "127.0.0.1", Port: "6000"},
{IP: "127.0.0.1", Port: "6002"},
}
actualPeers, err := p.SyncingPeers(0)
if assert.NoError(t, err) {
assert.ElementsMatch(t, actualPeers, expectedPeers)
}
})
t.Run("NoMatch", func(t *testing.T) {
p := makeLegacySyncingPeerProvider()
_, err := p.SyncingPeers(999)
assert.Error(t, err)
})
}
consensus, err := consensus.New(host, 0, leader, blsKey) func makeLegacySyncingPeerProvider() *LegacySyncingPeerProvider {
if err != nil { node := makeSyncOnlyNode()
t.Fatalf("Cannot craeate consensus: %v", err) p := NewLegacySyncingPeerProvider(node)
} p.shardID = func() uint32 { return 1 }
node := New(host, consensus, testDBFactory, false) return p
peer := p2p.Peer{IP: "127.0.0.1", Port: "8000"} }
peer2 := p2p.Peer{IP: "127.0.0.1", Port: "8001"}
node.Neighbors.Store("minh", peer) func makeSyncOnlyNode() *Node {
node.Neighbors.Store("mark", peer2) node := &Node{
res := node.GetSyncingPeers() Neighbors: sync.Map{},
if len(res) == 0 || !(res[0].IP == peer.IP || res[0].IP == peer2.IP) { BeaconNeighbors: sync.Map{},
t.Error("GetSyncingPeers should return list of {peer, peer2}")
}
if len(res) == 0 || (res[0].Port != "5000" && res[0].Port != "5001") {
t.Errorf("Syncing ports should be 5000, got %v", res[0].Port)
} }
node.Neighbors.Store(
"127.0.0.1:9001:omg", p2p.Peer{IP: "127.0.0.1", Port: "9001"})
node.Neighbors.Store(
"127.0.0.1:9003:wtf", p2p.Peer{IP: "127.0.0.1", Port: "9003"})
node.BeaconNeighbors.Store(
"127.0.0.1:9000:bbq", p2p.Peer{IP: "127.0.0.1", Port: "9000"})
node.BeaconNeighbors.Store(
"127.0.0.1:9002:cakes", p2p.Peer{IP: "127.0.0.1", Port: "9002"})
return node
}
func TestDNSSyncingPeerProvider(t *testing.T) {
t.Run("Happy", func(t *testing.T) {
p := NewDNSSyncingPeerProvider("example.com", "1234")
lookupCount := 0
lookupName := ""
p.lookupHost = func(name string) (addrs []string, err error) {
lookupCount++
lookupName = name
return []string{"1.2.3.4", "5.6.7.8"}, nil
}
expectedPeers := []p2p.Peer{
{IP: "1.2.3.4", Port: "1234"},
{IP: "5.6.7.8", Port: "1234"},
}
actualPeers, err := p.SyncingPeers( /*shardID*/ 3)
if assert.NoError(t, err) {
assert.Equal(t, actualPeers, expectedPeers)
}
assert.Equal(t, lookupCount, 1)
assert.Equal(t, lookupName, "s3.example.com")
if err != nil {
t.Fatalf("SyncingPeers returned non-nil error %#v", err)
}
})
t.Run("LookupError", func(t *testing.T) {
p := NewDNSSyncingPeerProvider("example.com", "1234")
p.lookupHost = func(_ string) ([]string, error) {
return nil, errors.New("omg")
}
_, actualErr := p.SyncingPeers( /*shardID*/ 3)
assert.Error(t, actualErr)
})
}
func TestLocalSyncingPeerProvider(t *testing.T) {
t.Run("BeaconChain", func(t *testing.T) {
p := makeLocalSyncingPeerProvider()
expectedBeaconPeers := []p2p.Peer{
{IP: "127.0.0.1", Port: "6000"},
{IP: "127.0.0.1", Port: "6002"},
{IP: "127.0.0.1", Port: "6004"},
}
if actualPeers, err := p.SyncingPeers(0); assert.NoError(t, err) {
assert.ElementsMatch(t, actualPeers, expectedBeaconPeers)
}
})
t.Run("Shard1Chain", func(t *testing.T) {
p := makeLocalSyncingPeerProvider()
expectedShard1Peers := []p2p.Peer{
// port 6001 omitted because self
{IP: "127.0.0.1", Port: "6003"},
{IP: "127.0.0.1", Port: "6005"},
}
if actualPeers, err := p.SyncingPeers(1); assert.NoError(t, err) {
assert.ElementsMatch(t, actualPeers, expectedShard1Peers)
}
})
t.Run("InvalidShard", func(t *testing.T) {
p := makeLocalSyncingPeerProvider()
_, err := p.SyncingPeers(999)
assert.Error(t, err)
})
}
func makeLocalSyncingPeerProvider() *LocalSyncingPeerProvider {
return NewLocalSyncingPeerProvider(6000, 6001, 2, 3)
} }
func TestAddPeers(t *testing.T) { func TestAddPeers(t *testing.T) {

@ -1,6 +1,7 @@
package worker package worker
import ( import (
"fmt"
"math/big" "math/big"
"time" "time"
@ -24,6 +25,8 @@ type environment struct {
header *types.Header header *types.Header
txs []*types.Transaction txs []*types.Transaction
receipts []*types.Receipt receipts []*types.Receipt
outcxs []*types.CXReceipt // cross shard transaction receipts (source shard)
incxs []*types.CXReceiptsProof // cross shard receipts and its proof (desitinatin shard)
} }
// Worker is the main object which takes care of submitting new work to consensus engine // Worker is the main object which takes care of submitting new work to consensus engine
@ -50,8 +53,9 @@ func (w *Worker) SelectTransactionsForNewBlock(txs types.Transactions, maxNumTxs
unselected := types.Transactions{} unselected := types.Transactions{}
invalid := types.Transactions{} invalid := types.Transactions{}
for _, tx := range txs { for _, tx := range txs {
if tx.ShardID() != w.shardID { if tx.ShardID() != w.shardID && tx.ToShardID() != w.shardID {
invalid = append(invalid, tx) invalid = append(invalid, tx)
continue
} }
snap := w.current.state.Snapshot() snap := w.current.state.Snapshot()
_, err := w.commitTransaction(tx, coinbase) _, err := w.commitTransaction(tx, coinbase)
@ -61,7 +65,7 @@ func (w *Worker) SelectTransactionsForNewBlock(txs types.Transactions, maxNumTxs
if err != nil { if err != nil {
w.current.state.RevertToSnapshot(snap) w.current.state.RevertToSnapshot(snap)
invalid = append(invalid, tx) invalid = append(invalid, tx)
utils.GetLogger().Debug("Invalid transaction", "Error", err) utils.Logger().Debug().Err(err).Msg("Invalid transaction")
} else { } else {
selected = append(selected, tx) selected = append(selected, tx)
} }
@ -73,13 +77,20 @@ func (w *Worker) SelectTransactionsForNewBlock(txs types.Transactions, maxNumTxs
func (w *Worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { func (w *Worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
snap := w.current.state.Snapshot() snap := w.current.state.Snapshot()
receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{}) receipt, cx, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{})
if err != nil { if err != nil {
w.current.state.RevertToSnapshot(snap) w.current.state.RevertToSnapshot(snap)
return nil, err return nil, err
} }
if receipt == nil {
utils.Logger().Warn().Interface("tx", tx).Interface("cx", cx).Msg("Receipt is Nil!")
return nil, fmt.Errorf("Receipt is Nil")
}
w.current.txs = append(w.current.txs, tx) w.current.txs = append(w.current.txs, tx)
w.current.receipts = append(w.current.receipts, receipt) w.current.receipts = append(w.current.receipts, receipt)
if cx != nil {
w.current.outcxs = append(w.current.outcxs, cx)
}
return receipt.Logs, nil return receipt.Logs, nil
} }
@ -101,6 +112,28 @@ func (w *Worker) CommitTransactions(txs types.Transactions, coinbase common.Addr
return nil return nil
} }
// CommitReceipts commits a list of already verified incoming cross shard receipts
func (w *Worker) CommitReceipts(receiptsList []*types.CXReceiptsProof) error {
if w.current.gasPool == nil {
w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit)
}
if len(receiptsList) == 0 {
w.current.header.IncomingReceiptHash = types.EmptyRootHash
} else {
w.current.header.IncomingReceiptHash = types.DeriveSha(types.CXReceiptsProofs(receiptsList))
}
for _, cx := range receiptsList {
core.ApplyIncomingReceipt(w.config, w.current.state, w.current.header, cx)
}
for _, cx := range receiptsList {
w.current.incxs = append(w.current.incxs, cx)
}
return nil
}
// UpdateCurrent updates the current environment with the current state and header. // UpdateCurrent updates the current environment with the current state and header.
func (w *Worker) UpdateCurrent(coinbase common.Address) error { func (w *Worker) UpdateCurrent(coinbase common.Address) error {
parent := w.chain.CurrentBlock() parent := w.chain.CurrentBlock()
@ -151,8 +184,18 @@ func (w *Worker) GetCurrentReceipts() []*types.Receipt {
return w.current.receipts return w.current.receipts
} }
// Commit generate a new block for the new txs. // OutgoingReceipts get the receipts generated starting from the last state.
func (w *Worker) Commit(sig []byte, signers []byte, viewID uint64, coinbase common.Address) (*types.Block, error) { func (w *Worker) OutgoingReceipts() []*types.CXReceipt {
return w.current.outcxs
}
// IncomingReceipts get incoming receipts in destination shard that is received from source shard
func (w *Worker) IncomingReceipts() []*types.CXReceiptsProof {
return w.current.incxs
}
// CommitWithCrossLinks generate a new block with cross links for the new txs.
func (w *Worker) CommitWithCrossLinks(sig []byte, signers []byte, viewID uint64, coinbase common.Address, crossLinks []byte) (*types.Block, error) {
if len(sig) > 0 && len(signers) > 0 { if len(sig) > 0 && len(signers) > 0 {
copy(w.current.header.LastCommitSignature[:], sig[:]) copy(w.current.header.LastCommitSignature[:], sig[:])
w.current.header.LastCommitBitmap = append(signers[:0:0], signers...) w.current.header.LastCommitBitmap = append(signers[:0:0], signers...)
@ -160,17 +203,23 @@ func (w *Worker) Commit(sig []byte, signers []byte, viewID uint64, coinbase comm
w.current.header.Coinbase = coinbase w.current.header.Coinbase = coinbase
w.current.header.ViewID = new(big.Int) w.current.header.ViewID = new(big.Int)
w.current.header.ViewID.SetUint64(viewID) w.current.header.ViewID.SetUint64(viewID)
w.current.header.CrossLinks = crossLinks
s := w.current.state.Copy() s := w.current.state.Copy()
copyHeader := types.CopyHeader(w.current.header) copyHeader := types.CopyHeader(w.current.header)
block, err := w.engine.Finalize(w.chain, copyHeader, s, w.current.txs, w.current.receipts) block, err := w.engine.Finalize(w.chain, copyHeader, s, w.current.txs, w.current.receipts, w.current.outcxs, w.current.incxs)
if err != nil { if err != nil {
return nil, ctxerror.New("cannot finalize block").WithCause(err) return nil, ctxerror.New("cannot finalize block").WithCause(err)
} }
return block, nil return block, nil
} }
// Commit generate a new block for the new txs.
func (w *Worker) Commit(sig []byte, signers []byte, viewID uint64, coinbase common.Address) (*types.Block, error) {
return w.CommitWithCrossLinks(sig, signers, viewID, coinbase, []byte{})
}
// New create a new worker object. // New create a new worker object.
func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus_engine.Engine, shardID uint32) *Worker { func New(config *params.ChainConfig, chain *core.BlockChain, engine consensus_engine.Engine, shardID uint32) *Worker {
worker := &Worker{ worker := &Worker{

@ -5,11 +5,12 @@ import (
"math/rand" "math/rand"
"testing" "testing"
chain2 "github.com/harmony-one/harmony/internal/chain"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/common/denominations" "github.com/harmony-one/harmony/common/denominations"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
@ -37,10 +38,10 @@ func TestNewWorker(t *testing.T) {
genesis := gspec.MustCommit(database) genesis := gspec.MustCommit(database)
_ = genesis _ = genesis
chain, _ := core.NewBlockChain(database, nil, gspec.Config, consensus.NewFaker(), vm.Config{}, nil) chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil)
// Create a new worker // Create a new worker
worker := New(params.TestChainConfig, chain, consensus.NewFaker(), 0) worker := New(params.TestChainConfig, chain, chain2.Engine, 0)
if worker.GetCurrentState().GetBalance(crypto.PubkeyToAddress(testBankKey.PublicKey)).Cmp(testBankFunds) != 0 { if worker.GetCurrentState().GetBalance(crypto.PubkeyToAddress(testBankKey.PublicKey)).Cmp(testBankFunds) != 0 {
t.Error("Worker state is not setup correctly") t.Error("Worker state is not setup correctly")
@ -54,15 +55,15 @@ func TestCommitTransactions(t *testing.T) {
gspec = core.Genesis{ gspec = core.Genesis{
Config: chainConfig, Config: chainConfig,
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
ShardID: 10, ShardID: 0,
} }
) )
gspec.MustCommit(database) gspec.MustCommit(database)
chain, _ := core.NewBlockChain(database, nil, gspec.Config, consensus.NewFaker(), vm.Config{}, nil) chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain2.Engine, vm.Config{}, nil)
// Create a new worker // Create a new worker
worker := New(params.TestChainConfig, chain, consensus.NewFaker(), 0) worker := New(params.TestChainConfig, chain, chain2.Engine, 0)
// Generate a test tx // Generate a test tx
baseNonce := worker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(testBankKey.PublicKey)) baseNonce := worker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(testBankKey.PublicKey))

@ -177,14 +177,12 @@ beta)
;; ;;
pangaea) pangaea)
bootnodes=( bootnodes=(
/ip4/100.26.90.187/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv /ip4/54.86.126.90/tcp/9867/p2p/Qmdfjtk6hPoyrH1zVD9PEH4zfWLo38dP2mDvvKXfh3tnEv
/ip4/54.213.43.194/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9 /ip4/52.40.84.2/tcp/9867/p2p/QmZJJx6AdaoEkGLrYG4JeLCKeCKDjnFz2wfHNHxAqFSGA9
/ip4/13.113.101.219/tcp/9867/p2p/QmQayinFSgMMw5cSpDUiD9pQ2WeP6WNmGxpZ6ou3mdVFJX
/ip4/99.81.170.167/tcp/9867/p2p/QmRVbTpEYup8dSaURZfF6ByrMTSKa4UyUzJhSjahFzRqNj
) )
REL=master REL=master
network_type=pangaea network_type=pangaea
dns_zone=p.hmny.io dns_zone=pga.hmny.io
;; ;;
*) *)
err 64 "${network}: invalid network" err 64 "${network}: invalid network"

@ -13,7 +13,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
core_state "github.com/harmony-one/harmony/core/state" core_state "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
@ -105,7 +104,7 @@ func fundFaucetContract(chain *core.BlockChain) {
fmt.Println("--------- Funding addresses for Faucet Contract Call ---------") fmt.Println("--------- Funding addresses for Faucet Contract Call ---------")
fmt.Println() fmt.Println()
contractworker = pkgworker.New(params.TestChainConfig, chain, consensus.NewFaker(), 0) contractworker = pkgworker.New(params.TestChainConfig, chain, chain.Engine(), 0)
nonce = contractworker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(FaucetPriKey.PublicKey)) nonce = contractworker.GetCurrentState().GetNonce(crypto.PubkeyToAddress(FaucetPriKey.PublicKey))
dataEnc = common.FromHex(FaucetContractBinary) dataEnc = common.FromHex(FaucetContractBinary)
ftx, _ := types.SignTx(types.NewContractCreation(nonce, 0, big.NewInt(7000000000000000000), params.TxGasContractCreation*10, nil, dataEnc), types.HomesteadSigner{}, FaucetPriKey) ftx, _ := types.SignTx(types.NewContractCreation(nonce, 0, big.NewInt(7000000000000000000), params.TxGasContractCreation*10, nil, dataEnc), types.HomesteadSigner{}, FaucetPriKey)
@ -331,7 +330,7 @@ func playStakingContract(chain *core.BlockChain) {
func main() { func main() {
genesis := gspec.MustCommit(database) genesis := gspec.MustCommit(database)
chain, _ := core.NewBlockChain(database, nil, gspec.Config, consensus.NewFaker(), vm.Config{}, nil) chain, _ := core.NewBlockChain(database, nil, gspec.Config, chain.Engine(), vm.Config{}, nil)
txpool := core.NewTxPool(core.DefaultTxPoolConfig, chainConfig, chain) txpool := core.NewTxPool(core.DefaultTxPoolConfig, chainConfig, chain)
@ -345,7 +344,7 @@ func main() {
//// Generate a small n-block chain and an uncle block for it //// Generate a small n-block chain and an uncle block for it
n := 3 n := 3
if n > 0 { if n > 0 {
blocks, _ := core.GenerateChain(chainConfig, genesis, consensus.NewFaker(), database, n, func(i int, gen *core.BlockGen) { blocks, _ := core.GenerateChain(chainConfig, genesis, chain.Engine(), database, n, func(i int, gen *core.BlockGen) {
gen.SetCoinbase(FaucetAddress) gen.SetCoinbase(FaucetAddress)
gen.SetShardID(0) gen.SetShardID(0)
gen.AddTx(pendingTxs[i]) gen.AddTx(pendingTxs[i])

@ -12,6 +12,7 @@
127.0.0.1 9011 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100 127.0.0.1 9011 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100
127.0.0.1 9012 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c 127.0.0.1 9012 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c
127.0.0.1 9013 validator one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k 63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512 127.0.0.1 9013 validator one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k 63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512
127.0.0.1 9099 explorer
127.0.0.1 9100 validator one1ghkz3frhske7emk79p7v2afmj4a5t0kmjyt4s5 eca09c1808b729ca56f1b5a6a287c6e1c3ae09e29ccf7efa35453471fcab07d9f73cee249e2b91f5ee44eb9618be3904 127.0.0.1 9100 validator one1ghkz3frhske7emk79p7v2afmj4a5t0kmjyt4s5 eca09c1808b729ca56f1b5a6a287c6e1c3ae09e29ccf7efa35453471fcab07d9f73cee249e2b91f5ee44eb9618be3904
127.0.0.1 9101 validator one1d7jfnr6yraxnrycgaemyktkmhmajhp8kl0yahv f47238daef97d60deedbde5302d05dea5de67608f11f406576e363661f7dcbc4a1385948549b31a6c70f6fde8a391486 127.0.0.1 9101 validator one1d7jfnr6yraxnrycgaemyktkmhmajhp8kl0yahv f47238daef97d60deedbde5302d05dea5de67608f11f406576e363661f7dcbc4a1385948549b31a6c70f6fde8a391486

@ -6,9 +6,10 @@
127.0.0.1 9005 validator one1est2gxcvavmtnzc7mhd73gzadm3xxcv5zczdtw 776f3b8704f4e1092a302a60e84f81e476c212d6f458092b696df420ea19ff84a6179e8e23d090b9297dc041600bc100 127.0.0.1 9005 validator one1est2gxcvavmtnzc7mhd73gzadm3xxcv5zczdtw 776f3b8704f4e1092a302a60e84f81e476c212d6f458092b696df420ea19ff84a6179e8e23d090b9297dc041600bc100
127.0.0.1 9006 validator one1spshr72utf6rwxseaz339j09ed8p6f8ke370zj 2d61379e44a772e5757e27ee2b3874254f56073e6bd226eb8b160371cc3c18b8c4977bd3dcb71fd57dc62bf0e143fd08 127.0.0.1 9006 validator one1spshr72utf6rwxseaz339j09ed8p6f8ke370zj 2d61379e44a772e5757e27ee2b3874254f56073e6bd226eb8b160371cc3c18b8c4977bd3dcb71fd57dc62bf0e143fd08
127.0.0.1 9007 validator one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9 c4e4708b6cf2a2ceeb59981677e9821eebafc5cf483fb5364a28fa604cc0ce69beeed40f3f03815c9e196fdaec5f1097 127.0.0.1 9007 validator one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9 c4e4708b6cf2a2ceeb59981677e9821eebafc5cf483fb5364a28fa604cc0ce69beeed40f3f03815c9e196fdaec5f1097
127.0.0.1 9008 validator one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc 6dc2fdc2ceec18f6923b99fd86a68405c132e1005cf1df72dca75db0adfaeb53d201d66af37916d61f079f34f21fb96 127.0.0.1 9008 validator one1d2rngmem4x2c6zxsjjz29dlah0jzkr0k2n88wc 86dc2fdc2ceec18f6923b99fd86a68405c132e1005cf1df72dca75db0adfaeb53d201d66af37916d61f079f34f21fb96
127.0.0.1 9009 validator one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep 49d15743b36334399f9985feb0753430a2b287b2d68b84495bbb15381854cbf01bca9d1d9f4c9c8f18509b2bfa6bd40f 127.0.0.1 9009 validator one1658znfwf40epvy7e46cqrmzyy54h4n0qa73nep 49d15743b36334399f9985feb0753430a2b287b2d68b84495bbb15381854cbf01bca9d1d9f4c9c8f18509b2bfa6bd40f
127.0.0.1 9010 validator one1a50tun737ulcvwy0yvve0pvu5skq0kjargvhwe 52ecce5f64db21cbe374c9268188f5d2cdd5bec1a3112276a350349860e35fb81f8cfe447a311e0550d961cf25cb988d 127.0.0.1 9010 validator one1a50tun737ulcvwy0yvve0pvu5skq0kjargvhwe 52ecce5f64db21cbe374c9268188f5d2cdd5bec1a3112276a350349860e35fb81f8cfe447a311e0550d961cf25cb988d
127.0.0.1 9011 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100 127.0.0.1 9011 validator one1uyshu2jgv8w465yc8kkny36thlt2wvel89tcmg a547a9bf6fdde4f4934cde21473748861a3cc0fe8bbb5e57225a29f483b05b72531f002f8187675743d819c955a86100
127.0.0.1 9012 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c 127.0.0.1 9012 validator one103q7qe5t2505lypvltkqtddaef5tzfxwsse4z7 678ec9670899bf6af85b877058bea4fc1301a5a3a376987e826e3ca150b80e3eaadffedad0fedfa111576fa76ded980c
127.0.0.1 9013 validator one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k 63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512 127.0.0.1 9013 validator one129r9pj3sk0re76f7zs3qz92rggmdgjhtwge62k 63f479f249c59f0486fda8caa2ffb247209489dae009dfde6144ff38c370230963d360dffd318cfb26c213320e89a512
127.0.0.1 9099 explorer

@ -1,3 +1,3 @@
./test/kill_node.sh ./test/kill_node.sh
rm -rf tmp_log* rm -rf tmp_log*
./test/deploy.sh -D 600 ./test/configs/local-resharding.txt ./test/deploy.sh -D 60000 ./test/configs/local-resharding.txt

@ -1,6 +1,12 @@
#!/bin/bash #!/bin/bash
ROOT=$(dirname $0)/.. unset -v progdir
case "${0}" in
*/*) progdir="${0%/*}" ;;
*) progdir=. ;;
esac
ROOT="${progdir}/.."
USER=$(whoami) USER=$(whoami)
. "${ROOT}/scripts/setup_bls_build_flags.sh" . "${ROOT}/scripts/setup_bls_build_flags.sh"
@ -30,24 +36,7 @@ function check_result() {
} }
function cleanup() { function cleanup() {
for pid in `/bin/ps -fu $USER| grep "harmony\|txgen\|soldier\|commander\|profiler\|bootnode" | grep -v "grep" | grep -v "vi" | awk '{print $2}'`; "${progdir}/kill_node.sh"
do
echo 'Killed process: '$pid
$DRYRUN kill -9 $pid 2> /dev/null
done
rm -rf ./db/harmony_*
rm -rf ./db-127.0.0.1-*
}
function killnode() {
local port=$1
if [ -n "port" ]; then
pid=$(/bin/ps -fu $USER | grep "harmony" | grep "$port" | awk '{print $2}')
echo "killing node with port: $port"
$DRYRUN kill -9 $pid 2> /dev/null
echo "node with port: $port is killed"
fi
} }
trap cleanup SIGINT SIGTERM trap cleanup SIGINT SIGTERM

@ -1,9 +1,3 @@
#!/bin/bash #!/bin/bash
pkill -9 '^(harmony|txgen|soldier|commander|profiler|beacon|bootnode)$' | sed 's/^/Killed process: /'
for pid in `/bin/ps -fu $USER| grep "harmony\|txgen\|soldier\|commander\|profiler\|beacon\|bootnode" | grep -v "grep" | grep -v "vi" | awk '{print $2}'`;
do
echo 'Killed process: '$pid
kill -9 $pid
done
rm -rf db-127.0.0.1-* rm -rf db-127.0.0.1-*

Loading…
Cancel
Save