Merge branch 'master' of https://github.com/harmony-one/harmony into crash_fix
commit
a026d3d6a3
@ -0,0 +1,83 @@ |
||||
package types |
||||
|
||||
import ( |
||||
"math/big" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// CrossLink is only used on beacon chain to store the hash links from other shards
|
||||
type CrossLink struct { |
||||
ChainHeader *Header |
||||
} |
||||
|
||||
// NewCrossLink returns a new cross link object
|
||||
func NewCrossLink(header *Header) CrossLink { |
||||
return CrossLink{header} |
||||
} |
||||
|
||||
// Header returns header
|
||||
func (cl CrossLink) Header() *Header { |
||||
return cl.ChainHeader |
||||
} |
||||
|
||||
// ShardID returns shardID
|
||||
func (cl CrossLink) ShardID() uint32 { |
||||
return cl.ChainHeader.ShardID |
||||
} |
||||
|
||||
// BlockNum returns blockNum
|
||||
func (cl CrossLink) BlockNum() *big.Int { |
||||
return cl.ChainHeader.Number |
||||
} |
||||
|
||||
// Hash returns hash
|
||||
func (cl CrossLink) Hash() common.Hash { |
||||
return cl.ChainHeader.Hash() |
||||
} |
||||
|
||||
// StateRoot returns hash of state root
|
||||
func (cl CrossLink) StateRoot() common.Hash { |
||||
return cl.ChainHeader.Root |
||||
} |
||||
|
||||
// OutgoingReceiptsRoot returns hash of cross shard receipts
|
||||
func (cl CrossLink) OutgoingReceiptsRoot() common.Hash { |
||||
return cl.ChainHeader.OutgoingReceiptHash |
||||
} |
||||
|
||||
// Serialize returns bytes of cross link rlp-encoded content
|
||||
func (cl CrossLink) Serialize() []byte { |
||||
bytes, _ := rlp.EncodeToBytes(cl) |
||||
return bytes |
||||
} |
||||
|
||||
// DeserializeCrossLink rlp-decode the bytes into cross link object.
|
||||
func DeserializeCrossLink(bytes []byte) (*CrossLink, error) { |
||||
cl := &CrossLink{} |
||||
err := rlp.DecodeBytes(bytes, cl) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return cl, err |
||||
} |
||||
|
||||
// CrossLinks is a collection of cross links
|
||||
type CrossLinks []CrossLink |
||||
|
||||
// Sort crosslinks by shardID and then by blockNum
|
||||
func (cls CrossLinks) Sort() { |
||||
sort.Slice(cls, func(i, j int) bool { |
||||
return cls[i].ShardID() < cls[j].ShardID() || (cls[i].ShardID() == cls[j].ShardID() && cls[i].BlockNum().Cmp(cls[j].BlockNum()) < 0) |
||||
}) |
||||
} |
||||
|
||||
// IsSorted checks whether the cross links are sorted
|
||||
func (cls CrossLinks) IsSorted() bool { |
||||
return sort.SliceIsSorted(cls, func(i, j int) bool { |
||||
return cls[i].ShardID() < cls[j].ShardID() || (cls[i].ShardID() == cls[j].ShardID() && cls[i].BlockNum().Cmp(cls[j].BlockNum()) < 0) |
||||
}) |
||||
} |
@ -0,0 +1,180 @@ |
||||
package types |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
|
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
) |
||||
|
||||
// CXReceipt represents a receipt for cross-shard transaction
|
||||
type CXReceipt struct { |
||||
TxHash common.Hash // hash of the cross shard transaction in source shard
|
||||
From common.Address |
||||
To *common.Address |
||||
ShardID uint32 |
||||
ToShardID uint32 |
||||
Amount *big.Int |
||||
} |
||||
|
||||
// CXReceipts is a list of CXReceipt
|
||||
type CXReceipts []*CXReceipt |
||||
|
||||
// Len returns the length of s.
|
||||
func (cs CXReceipts) Len() int { return len(cs) } |
||||
|
||||
// Swap swaps the i'th and the j'th element in s.
|
||||
func (cs CXReceipts) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } |
||||
|
||||
// GetRlp implements Rlpable and returns the i'th element of s in rlp.
|
||||
func (cs CXReceipts) GetRlp(i int) []byte { |
||||
if len(cs) == 0 { |
||||
return []byte{} |
||||
} |
||||
enc, _ := rlp.EncodeToBytes(cs[i]) |
||||
return enc |
||||
} |
||||
|
||||
// ToShardID returns the destination shardID of the cxReceipt
|
||||
func (cs CXReceipts) ToShardID(i int) uint32 { |
||||
if len(cs) == 0 { |
||||
return 0 |
||||
} |
||||
return cs[i].ToShardID |
||||
} |
||||
|
||||
// MaxToShardID returns the maximum destination shardID of cxReceipts
|
||||
func (cs CXReceipts) MaxToShardID() uint32 { |
||||
maxShardID := uint32(0) |
||||
if len(cs) == 0 { |
||||
return maxShardID |
||||
} |
||||
for i := 0; i < len(cs); i++ { |
||||
if maxShardID < cs[i].ToShardID { |
||||
maxShardID = cs[i].ToShardID |
||||
} |
||||
} |
||||
return maxShardID |
||||
} |
||||
|
||||
// NewCrossShardReceipt creates a cross shard receipt
|
||||
func NewCrossShardReceipt(txHash common.Hash, from common.Address, to *common.Address, shardID uint32, toShardID uint32, amount *big.Int) *CXReceipt { |
||||
return &CXReceipt{TxHash: txHash, From: from, To: to, ShardID: shardID, ToShardID: toShardID, Amount: amount} |
||||
} |
||||
|
||||
// CXMerkleProof represents the merkle proof of a collection of ordered cross shard transactions
|
||||
type CXMerkleProof struct { |
||||
BlockNum *big.Int // blockNumber of source shard
|
||||
BlockHash common.Hash // blockHash of source shard
|
||||
ShardID uint32 // shardID of source shard
|
||||
CXReceiptHash common.Hash // root hash of the cross shard receipts in a given block
|
||||
ShardIDs []uint32 // order list, records destination shardID
|
||||
CXShardHashes []common.Hash // ordered hash list, each hash corresponds to one destination shard's receipts root hash
|
||||
} |
||||
|
||||
// CXReceiptsProof carrys the cross shard receipts and merkle proof
|
||||
type CXReceiptsProof struct { |
||||
Receipts CXReceipts |
||||
MerkleProof *CXMerkleProof |
||||
} |
||||
|
||||
// CXReceiptsProofs is a list of CXReceiptsProof
|
||||
type CXReceiptsProofs []*CXReceiptsProof |
||||
|
||||
// Len returns the length of s.
|
||||
func (cs CXReceiptsProofs) Len() int { return len(cs) } |
||||
|
||||
// Swap swaps the i'th and the j'th element in s.
|
||||
func (cs CXReceiptsProofs) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } |
||||
|
||||
// GetRlp implements Rlpable and returns the i'th element of s in rlp.
|
||||
func (cs CXReceiptsProofs) GetRlp(i int) []byte { |
||||
if len(cs) == 0 { |
||||
return []byte{} |
||||
} |
||||
enc, _ := rlp.EncodeToBytes(cs[i]) |
||||
return enc |
||||
} |
||||
|
||||
// ToShardID returns the destination shardID of the cxReceipt
|
||||
// Not used
|
||||
func (cs CXReceiptsProofs) ToShardID(i int) uint32 { |
||||
return 0 |
||||
} |
||||
|
||||
// MaxToShardID returns the maximum destination shardID of cxReceipts
|
||||
// Not used
|
||||
func (cs CXReceiptsProofs) MaxToShardID() uint32 { |
||||
return 0 |
||||
} |
||||
|
||||
// GetToShardID get the destination shardID, return error if there is more than one unique shardID
|
||||
func (cxp *CXReceiptsProof) GetToShardID() (uint32, error) { |
||||
var shardID uint32 |
||||
if cxp == nil || len(cxp.Receipts) == 0 { |
||||
return uint32(0), ctxerror.New("[GetShardID] CXReceiptsProof or its receipts is NIL") |
||||
} |
||||
for i, cx := range cxp.Receipts { |
||||
if i == 0 { |
||||
shardID = cx.ToShardID |
||||
} else if shardID == cx.ToShardID { |
||||
continue |
||||
} else { |
||||
return shardID, ctxerror.New("[GetShardID] CXReceiptsProof contains distinct ToShardID") |
||||
} |
||||
} |
||||
return shardID, nil |
||||
} |
||||
|
||||
// IsValidCXReceiptsProof checks whether the given CXReceiptsProof is consistency with itself
|
||||
func (cxp *CXReceiptsProof) IsValidCXReceiptsProof() error { |
||||
toShardID, err := cxp.GetToShardID() |
||||
if err != nil { |
||||
return ctxerror.New("[IsValidCXReceiptsProof] invalid shardID").WithCause(err) |
||||
} |
||||
|
||||
merkleProof := cxp.MerkleProof |
||||
shardRoot := common.Hash{} |
||||
foundMatchingShardID := false |
||||
byteBuffer := bytes.NewBuffer([]byte{}) |
||||
|
||||
// prepare to calculate source shard outgoing cxreceipts root hash
|
||||
for j := 0; j < len(merkleProof.ShardIDs); j++ { |
||||
sKey := make([]byte, 4) |
||||
binary.BigEndian.PutUint32(sKey, merkleProof.ShardIDs[j]) |
||||
byteBuffer.Write(sKey) |
||||
byteBuffer.Write(merkleProof.CXShardHashes[j][:]) |
||||
if merkleProof.ShardIDs[j] == toShardID { |
||||
shardRoot = merkleProof.CXShardHashes[j] |
||||
foundMatchingShardID = true |
||||
} |
||||
} |
||||
|
||||
if !foundMatchingShardID { |
||||
return ctxerror.New("[IsValidCXReceiptsProof] Didn't find matching shardID") |
||||
} |
||||
|
||||
sourceShardID := merkleProof.ShardID |
||||
sourceBlockNum := merkleProof.BlockNum |
||||
sourceOutgoingCXReceiptsHash := merkleProof.CXReceiptHash |
||||
|
||||
sha := DeriveSha(cxp.Receipts) |
||||
|
||||
// (1) verify the CXReceipts trie root match
|
||||
if sha != shardRoot { |
||||
return ctxerror.New("[IsValidCXReceiptsProof] Trie Root of ReadCXReceipts Not Match", "sourceShardID", sourceShardID, "sourceBlockNum", sourceBlockNum, "calculated", sha, "got", shardRoot) |
||||
} |
||||
|
||||
// (2) verify the outgoingCXReceiptsHash match
|
||||
outgoingHashFromSourceShard := crypto.Keccak256Hash(byteBuffer.Bytes()) |
||||
if outgoingHashFromSourceShard != sourceOutgoingCXReceiptsHash { |
||||
return ctxerror.New("[IsValidCXReceiptsProof] IncomingReceiptRootHash from source shard not match", "sourceShardID", sourceShardID, "sourceBlockNum", sourceBlockNum, "calculated", outgoingHashFromSourceShard, "got", sourceOutgoingCXReceiptsHash) |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,187 @@ |
||||
package chain |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/pkg/errors" |
||||
"golang.org/x/crypto/sha3" |
||||
|
||||
"github.com/harmony-one/harmony/consensus/engine" |
||||
"github.com/harmony-one/harmony/core/state" |
||||
"github.com/harmony-one/harmony/core/types" |
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
type engineImpl struct{} |
||||
|
||||
// Engine is an algorithm-agnostic consensus engine.
|
||||
var Engine = &engineImpl{} |
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
func (e *engineImpl) SealHash(header *types.Header) (hash common.Hash) { |
||||
hasher := sha3.NewLegacyKeccak256() |
||||
// TODO: update with new fields
|
||||
if err := rlp.Encode(hasher, []interface{}{ |
||||
header.ParentHash, |
||||
header.Coinbase, |
||||
header.Root, |
||||
header.TxHash, |
||||
header.ReceiptHash, |
||||
header.Bloom, |
||||
header.Number, |
||||
header.GasLimit, |
||||
header.GasUsed, |
||||
header.Time, |
||||
header.Extra, |
||||
}); err != nil { |
||||
utils.Logger().Warn().Err(err).Msg("rlp.Encode failed") |
||||
} |
||||
hasher.Sum(hash[:0]) |
||||
return hash |
||||
} |
||||
|
||||
// Seal is to seal final block.
|
||||
func (e *engineImpl) Seal(chain engine.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { |
||||
// TODO: implement final block sealing
|
||||
return nil |
||||
} |
||||
|
||||
// Author returns the author of the block header.
|
||||
func (e *engineImpl) Author(header *types.Header) (common.Address, error) { |
||||
// TODO: implement this
|
||||
return common.Address{}, nil |
||||
} |
||||
|
||||
// Prepare is to prepare ...
|
||||
// TODO(RJ): fix it.
|
||||
func (e *engineImpl) Prepare(chain engine.ChainReader, header *types.Header) error { |
||||
// TODO: implement prepare method
|
||||
return nil |
||||
} |
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of the bft engine.
|
||||
func (e *engineImpl) VerifyHeader(chain engine.ChainReader, header *types.Header, seal bool) error { |
||||
parentHeader := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) |
||||
if parentHeader == nil { |
||||
return engine.ErrUnknownAncestor |
||||
} |
||||
if seal { |
||||
if err := e.VerifySeal(chain, header); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications.
|
||||
func (e *engineImpl) VerifyHeaders(chain engine.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { |
||||
abort, results := make(chan struct{}), make(chan error, len(headers)) |
||||
for i := 0; i < len(headers); i++ { |
||||
results <- nil |
||||
} |
||||
return abort, results |
||||
} |
||||
|
||||
// retrievePublicKeysFromLastBlock finds the public keys of last block's committee
|
||||
func retrievePublicKeysFromLastBlock(bc engine.ChainReader, header *types.Header) ([]*bls.PublicKey, error) { |
||||
parentHeader := bc.GetHeaderByHash(header.ParentHash) |
||||
if parentHeader == nil { |
||||
return nil, ctxerror.New("cannot find parent block header in DB", |
||||
"parentHash", header.ParentHash) |
||||
} |
||||
parentShardState, err := bc.ReadShardState(parentHeader.Epoch) |
||||
if err != nil { |
||||
return nil, ctxerror.New("cannot read shard state", |
||||
"epoch", parentHeader.Epoch, |
||||
).WithCause(err) |
||||
} |
||||
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID) |
||||
if parentCommittee == nil { |
||||
return nil, ctxerror.New("cannot find shard in the shard state", |
||||
"parentBlockNumber", parentHeader.Number, |
||||
"shardID", parentHeader.ShardID, |
||||
) |
||||
} |
||||
var committerKeys []*bls.PublicKey |
||||
for _, member := range parentCommittee.NodeList { |
||||
committerKey := new(bls.PublicKey) |
||||
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey) |
||||
if err != nil { |
||||
return nil, ctxerror.New("cannot convert BLS public key", |
||||
"blsPublicKey", member.BlsPublicKey).WithCause(err) |
||||
} |
||||
committerKeys = append(committerKeys, committerKey) |
||||
} |
||||
return committerKeys, nil |
||||
} |
||||
|
||||
// VerifySeal implements Engine, checking whether the given block satisfies
|
||||
// the PoS difficulty requirements, i.e. >= 2f+1 valid signatures from the committee
|
||||
func (e *engineImpl) VerifySeal(chain engine.ChainReader, header *types.Header) error { |
||||
if chain.CurrentHeader().Number.Uint64() <= uint64(1) { |
||||
return nil |
||||
} |
||||
publicKeys, err := retrievePublicKeysFromLastBlock(chain, header) |
||||
if err != nil { |
||||
return ctxerror.New("[VerifySeal] Cannot retrieve publickeys from last block").WithCause(err) |
||||
} |
||||
payload := append(header.LastCommitSignature[:], header.LastCommitBitmap...) |
||||
aggSig, mask, err := ReadSignatureBitmapByPublicKeys(payload, publicKeys) |
||||
if err != nil { |
||||
return ctxerror.New("[VerifySeal] Unable to deserialize the LastCommitSignature and LastCommitBitmap in Block Header").WithCause(err) |
||||
} |
||||
parentHeader := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) |
||||
parentQuorum, err := QuorumForBlock(chain, parentHeader) |
||||
if err != nil { |
||||
return errors.Wrapf(err, |
||||
"cannot calculate quorum for block %s", header.Number) |
||||
} |
||||
if count := utils.CountOneBits(mask.Bitmap); count < parentQuorum { |
||||
return ctxerror.New("[VerifySeal] Not enough signature in LastCommitSignature from Block Header", |
||||
"need", parentQuorum, "got", count) |
||||
} |
||||
|
||||
blockNumHash := make([]byte, 8) |
||||
binary.LittleEndian.PutUint64(blockNumHash, header.Number.Uint64()-1) |
||||
lastCommitPayload := append(blockNumHash, header.ParentHash[:]...) |
||||
|
||||
if !aggSig.VerifyHash(mask.AggregatePublic, lastCommitPayload) { |
||||
return ctxerror.New("[VerifySeal] Unable to verify aggregated signature from last block", "lastBlockNum", header.Number.Uint64()-1, "lastBlockHash", header.ParentHash) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Finalize implements Engine, accumulating the block rewards,
|
||||
// setting the final state and assembling the block.
|
||||
func (e *engineImpl) Finalize(chain engine.ChainReader, header *types.Header, state *state.DB, txs []*types.Transaction, receipts []*types.Receipt, outcxs []*types.CXReceipt, incxs []*types.CXReceiptsProof) (*types.Block, error) { |
||||
// Accumulate any block and uncle rewards and commit the final state root
|
||||
// Header seems complete, assemble into a block and return
|
||||
if err := AccumulateRewards(chain, state, header); err != nil { |
||||
return nil, ctxerror.New("cannot pay block reward").WithCause(err) |
||||
} |
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) |
||||
return types.NewBlock(header, txs, receipts, outcxs, incxs), nil |
||||
} |
||||
|
||||
// QuorumForBlock returns the quorum for the given block header.
|
||||
func QuorumForBlock( |
||||
chain engine.ChainReader, h *types.Header, |
||||
) (quorum int, err error) { |
||||
ss, err := chain.ReadShardState(h.Epoch) |
||||
if err != nil { |
||||
return 0, errors.Wrapf(err, |
||||
"cannot read shard state for epoch %s", h.Epoch) |
||||
} |
||||
c := ss.FindCommitteeByID(h.ShardID) |
||||
if c == nil { |
||||
return 0, errors.Errorf( |
||||
"cannot find shard %d in shard state", h.ShardID) |
||||
} |
||||
return (len(c.NodeList))*2/3 + 1, nil |
||||
} |
@ -0,0 +1,105 @@ |
||||
package chain |
||||
|
||||
import ( |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
|
||||
"github.com/harmony-one/harmony/common/denominations" |
||||
"github.com/harmony-one/harmony/consensus/engine" |
||||
"github.com/harmony-one/harmony/core/state" |
||||
"github.com/harmony-one/harmony/core/types" |
||||
bls2 "github.com/harmony-one/harmony/crypto/bls" |
||||
common2 "github.com/harmony-one/harmony/internal/common" |
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// BlockReward is the block reward, to be split evenly among block signers.
|
||||
var BlockReward = new(big.Int).Mul(big.NewInt(24), big.NewInt(denominations.One)) |
||||
|
||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||
// reward. The total reward consists of the static block reward and rewards for
|
||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||
func AccumulateRewards( |
||||
bc engine.ChainReader, state *state.DB, header *types.Header, |
||||
) error { |
||||
blockNum := header.Number.Uint64() |
||||
if blockNum == 0 { |
||||
// Epoch block has no parent to reward.
|
||||
return nil |
||||
} |
||||
// TODO ek – retrieving by parent number (blockNum - 1) doesn't work,
|
||||
// while it is okay with hash. Sounds like DB inconsistency.
|
||||
// Figure out why.
|
||||
parentHeader := bc.GetHeaderByHash(header.ParentHash) |
||||
if parentHeader == nil { |
||||
return ctxerror.New("cannot find parent block header in DB", |
||||
"parentHash", header.ParentHash) |
||||
} |
||||
if parentHeader.Number.Cmp(common.Big0) == 0 { |
||||
// Parent is an epoch block,
|
||||
// which is not signed in the usual manner therefore rewards nothing.
|
||||
return nil |
||||
} |
||||
parentShardState, err := bc.ReadShardState(parentHeader.Epoch) |
||||
if err != nil { |
||||
return ctxerror.New("cannot read shard state", |
||||
"epoch", parentHeader.Epoch, |
||||
).WithCause(err) |
||||
} |
||||
parentCommittee := parentShardState.FindCommitteeByID(parentHeader.ShardID) |
||||
if parentCommittee == nil { |
||||
return ctxerror.New("cannot find shard in the shard state", |
||||
"parentBlockNumber", parentHeader.Number, |
||||
"shardID", parentHeader.ShardID, |
||||
) |
||||
} |
||||
var committerKeys []*bls.PublicKey |
||||
for _, member := range parentCommittee.NodeList { |
||||
committerKey := new(bls.PublicKey) |
||||
err := member.BlsPublicKey.ToLibBLSPublicKey(committerKey) |
||||
if err != nil { |
||||
return ctxerror.New("cannot convert BLS public key", |
||||
"blsPublicKey", member.BlsPublicKey).WithCause(err) |
||||
} |
||||
committerKeys = append(committerKeys, committerKey) |
||||
} |
||||
mask, err := bls2.NewMask(committerKeys, nil) |
||||
if err != nil { |
||||
return ctxerror.New("cannot create group sig mask").WithCause(err) |
||||
} |
||||
if err := mask.SetMask(header.LastCommitBitmap); err != nil { |
||||
return ctxerror.New("cannot set group sig mask bits").WithCause(err) |
||||
} |
||||
totalAmount := big.NewInt(0) |
||||
var accounts []common.Address |
||||
signers := []string{} |
||||
for idx, member := range parentCommittee.NodeList { |
||||
if signed, err := mask.IndexEnabled(idx); err != nil { |
||||
return ctxerror.New("cannot check for committer bit", |
||||
"committerIndex", idx, |
||||
).WithCause(err) |
||||
} else if signed { |
||||
accounts = append(accounts, member.EcdsaAddress) |
||||
} |
||||
} |
||||
numAccounts := big.NewInt(int64(len(accounts))) |
||||
last := new(big.Int) |
||||
for i, account := range accounts { |
||||
cur := new(big.Int) |
||||
cur.Mul(BlockReward, big.NewInt(int64(i+1))).Div(cur, numAccounts) |
||||
diff := new(big.Int).Sub(cur, last) |
||||
signers = append(signers, common2.MustAddressToBech32(account)) |
||||
state.AddBalance(account, diff) |
||||
totalAmount = new(big.Int).Add(totalAmount, diff) |
||||
last = cur |
||||
} |
||||
header.Logger(utils.Logger()).Debug(). |
||||
Str("NumAccounts", numAccounts.String()). |
||||
Str("TotalAmount", totalAmount.String()). |
||||
Strs("Signers", signers). |
||||
Msg("[Block Reward] Successfully paid out block reward") |
||||
return nil |
||||
} |
@ -0,0 +1,41 @@ |
||||
package chain |
||||
|
||||
import ( |
||||
"errors" |
||||
|
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
|
||||
bls2 "github.com/harmony-one/harmony/crypto/bls" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// ReadSignatureBitmapByPublicKeys read the payload of signature and bitmap based on public keys
|
||||
func ReadSignatureBitmapByPublicKeys(recvPayload []byte, publicKeys []*bls.PublicKey) (*bls.Sign, *bls2.Mask, error) { |
||||
if len(recvPayload) < 96 { |
||||
return nil, nil, errors.New("payload not have enough length") |
||||
} |
||||
payload := append(recvPayload[:0:0], recvPayload...) |
||||
//#### Read payload data
|
||||
// 96 byte of multi-sig
|
||||
offset := 0 |
||||
multiSig := payload[offset : offset+96] |
||||
offset += 96 |
||||
// bitmap
|
||||
bitmap := payload[offset:] |
||||
//#### END Read payload data
|
||||
|
||||
aggSig := bls.Sign{} |
||||
err := aggSig.Deserialize(multiSig) |
||||
if err != nil { |
||||
return nil, nil, errors.New("unable to deserialize multi-signature from payload") |
||||
} |
||||
mask, err := bls2.NewMask(publicKeys, nil) |
||||
if err != nil { |
||||
utils.Logger().Warn().Err(err).Msg("onNewView unable to setup mask for prepared message") |
||||
return nil, nil, errors.New("unable to setup mask from payload") |
||||
} |
||||
if err := mask.SetMask(bitmap); err != nil { |
||||
utils.Logger().Warn().Err(err).Msg("mask.SetMask failed") |
||||
} |
||||
return &aggSig, mask, nil |
||||
} |
@ -0,0 +1,64 @@ |
||||
package main |
||||
|
||||
import ( |
||||
"encoding/gob" |
||||
"flag" |
||||
"fmt" |
||||
"os" |
||||
"path" |
||||
"reflect" |
||||
|
||||
"github.com/golang/mock/mockgen/model" |
||||
|
||||
pkg_ "github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
var output = flag.String("output", "", "The output file name, or empty to use stdout.") |
||||
|
||||
func main() { |
||||
flag.Parse() |
||||
|
||||
its := []struct { |
||||
sym string |
||||
typ reflect.Type |
||||
}{ |
||||
|
||||
{"Handler", reflect.TypeOf((*pkg_.Handler)(nil)).Elem()}, |
||||
} |
||||
pkg := &model.Package{ |
||||
// NOTE: This behaves contrary to documented behaviour if the
|
||||
// package name is not the final component of the import path.
|
||||
// The reflect package doesn't expose the package name, though.
|
||||
Name: path.Base("github.com/ethereum/go-ethereum/log"), |
||||
} |
||||
|
||||
for _, it := range its { |
||||
intf, err := model.InterfaceFromInterfaceType(it.typ) |
||||
if err != nil { |
||||
fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) |
||||
os.Exit(1) |
||||
} |
||||
intf.Name = it.sym |
||||
pkg.Interfaces = append(pkg.Interfaces, intf) |
||||
} |
||||
|
||||
outfile := os.Stdout |
||||
if len(*output) != 0 { |
||||
var err error |
||||
outfile, err = os.Create(*output) |
||||
if err != nil { |
||||
fmt.Fprintf(os.Stderr, "failed to open output file %q", *output) |
||||
} |
||||
defer func() { |
||||
if err := outfile.Close(); err != nil { |
||||
fmt.Fprintf(os.Stderr, "failed to close output file %q", *output) |
||||
os.Exit(1) |
||||
} |
||||
}() |
||||
} |
||||
|
||||
if err := gob.NewEncoder(outfile).Encode(pkg); err != nil { |
||||
fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) |
||||
os.Exit(1) |
||||
} |
||||
} |
@ -0,0 +1,278 @@ |
||||
package node |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"errors" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
|
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/core" |
||||
"github.com/harmony-one/harmony/core/types" |
||||
bls_cosi "github.com/harmony-one/harmony/crypto/bls" |
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// ProcessHeaderMessage verify and process Node/Header message into crosslink when it's valid
|
||||
func (node *Node) ProcessHeaderMessage(msgPayload []byte) { |
||||
if node.NodeConfig.ShardID == 0 { |
||||
|
||||
var headers []*types.Header |
||||
err := rlp.DecodeBytes(msgPayload, &headers) |
||||
if err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Msg("[ProcessingHeader] Crosslink Headers Broadcast Unable to Decode") |
||||
return |
||||
} |
||||
|
||||
// Try to reprocess all the pending cross links
|
||||
node.pendingClMutex.Lock() |
||||
crossLinkHeadersToProcess := node.pendingCrossLinks |
||||
node.pendingCrossLinks = []*types.Header{} |
||||
node.pendingClMutex.Unlock() |
||||
|
||||
firstCrossLinkBlock := core.ShardingSchedule.FirstCrossLinkBlock() |
||||
for _, header := range headers { |
||||
if header.Number.Uint64() >= firstCrossLinkBlock { |
||||
// Only process cross link starting from FirstCrossLinkBlock
|
||||
crossLinkHeadersToProcess = append(crossLinkHeadersToProcess, header) |
||||
} |
||||
} |
||||
|
||||
headersToQuque := []*types.Header{} |
||||
|
||||
for _, header := range crossLinkHeadersToProcess { |
||||
exist, err := node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64(), false) |
||||
if err == nil && exist != nil { |
||||
utils.Logger().Debug(). |
||||
Msgf("[ProcessingHeader] Cross Link already exists, pass. Block num: %d", header.Number) |
||||
continue |
||||
} |
||||
|
||||
if header.Number.Uint64() > firstCrossLinkBlock { // Directly trust the first cross-link
|
||||
// Sanity check on the previous link with the new link
|
||||
previousLink, err := node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64()-1, false) |
||||
if err != nil { |
||||
previousLink, err = node.Blockchain().ReadCrossLink(header.ShardID, header.Number.Uint64()-1, true) |
||||
if err != nil { |
||||
headersToQuque = append(headersToQuque, header) |
||||
continue |
||||
} |
||||
} |
||||
|
||||
err = node.VerifyCrosslinkHeader(previousLink.Header(), header) |
||||
if err != nil { |
||||
utils.Logger().Warn(). |
||||
Err(err). |
||||
Msgf("[ProcessingHeader] Failed to verify new cross link header for shardID %d, blockNum %d", header.ShardID, header.Number) |
||||
continue |
||||
} |
||||
} |
||||
|
||||
crossLink := types.NewCrossLink(header) |
||||
utils.Logger().Debug(). |
||||
Msgf("[ProcessingHeader] committing for shardID %d, blockNum %d", header.ShardID, header.Number.Uint64()) |
||||
node.Blockchain().WriteCrossLinks(types.CrossLinks{crossLink}, true) |
||||
} |
||||
|
||||
// Queue up the cross links that's in the future
|
||||
node.pendingClMutex.Lock() |
||||
node.pendingCrossLinks = append(node.pendingCrossLinks, headersToQuque...) |
||||
node.pendingClMutex.Unlock() |
||||
} |
||||
} |
||||
|
||||
func (node *Node) verifyIncomingReceipts(block *types.Block) error { |
||||
m := make(map[common.Hash]bool) |
||||
cxps := block.IncomingReceipts() |
||||
for _, cxp := range cxps { |
||||
if err := cxp.IsValidCXReceiptsProof(); err != nil { |
||||
return ctxerror.New("[verifyIncomingReceipts] verification failed").WithCause(err) |
||||
} |
||||
if node.Blockchain().IsSpent(cxp) { |
||||
return ctxerror.New("[verifyIncomingReceipts] Double Spent!") |
||||
} |
||||
hash := cxp.MerkleProof.BlockHash |
||||
// ignore duplicated receipts
|
||||
if _, ok := m[hash]; ok { |
||||
return ctxerror.New("[verifyIncomingReceipts] Double Spent!") |
||||
} |
||||
m[hash] = true |
||||
|
||||
if err := node.compareCrosslinkWithReceipts(cxp); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (node *Node) compareCrosslinkWithReceipts(cxp *types.CXReceiptsProof) error { |
||||
var hash, outgoingReceiptHash common.Hash |
||||
|
||||
shardID := cxp.MerkleProof.ShardID |
||||
blockNum := cxp.MerkleProof.BlockNum.Uint64() |
||||
beaconChain := node.Beaconchain() |
||||
if shardID == 0 { |
||||
block := beaconChain.GetBlockByNumber(blockNum) |
||||
if block == nil { |
||||
return ctxerror.New("[compareCrosslinkWithReceipts] Cannot get beaconchain header", "blockNum", blockNum, "shardID", shardID) |
||||
} |
||||
hash = block.Hash() |
||||
outgoingReceiptHash = block.OutgoingReceiptHash() |
||||
} else { |
||||
crossLink, err := beaconChain.ReadCrossLink(shardID, blockNum, false) |
||||
if err != nil { |
||||
return ctxerror.New("[compareCrosslinkWithReceipts] Cannot get crosslink", "blockNum", blockNum, "shardID", shardID).WithCause(err) |
||||
} |
||||
hash = crossLink.ChainHeader.Hash() |
||||
outgoingReceiptHash = crossLink.ChainHeader.OutgoingReceiptHash |
||||
} |
||||
// verify the source block hash is from a finalized block
|
||||
if hash == cxp.MerkleProof.BlockHash && outgoingReceiptHash == cxp.MerkleProof.CXReceiptHash { |
||||
return nil |
||||
} |
||||
return ErrCrosslinkVerificationFail |
||||
} |
||||
|
||||
// VerifyCrosslinkHeader verifies the header is valid against the prevHeader.
|
||||
func (node *Node) VerifyCrosslinkHeader(prevHeader, header *types.Header) error { |
||||
|
||||
// TODO: add fork choice rule
|
||||
if prevHeader.Hash() != header.ParentHash { |
||||
return ctxerror.New("[CrossLink] Invalid cross link header - parent hash mismatch", "shardID", header.ShardID, "blockNum", header.Number) |
||||
} |
||||
|
||||
// Verify signature of the new cross link header
|
||||
shardState, err := node.Blockchain().ReadShardState(prevHeader.Epoch) |
||||
committee := shardState.FindCommitteeByID(prevHeader.ShardID) |
||||
|
||||
if err != nil || committee == nil { |
||||
return ctxerror.New("[CrossLink] Failed to read shard state for cross link header", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) |
||||
} |
||||
var committerKeys []*bls.PublicKey |
||||
|
||||
parseKeysSuccess := true |
||||
for _, member := range committee.NodeList { |
||||
committerKey := new(bls.PublicKey) |
||||
err = member.BlsPublicKey.ToLibBLSPublicKey(committerKey) |
||||
if err != nil { |
||||
parseKeysSuccess = false |
||||
break |
||||
} |
||||
committerKeys = append(committerKeys, committerKey) |
||||
} |
||||
if !parseKeysSuccess { |
||||
return ctxerror.New("[CrossLink] cannot convert BLS public key", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) |
||||
} |
||||
|
||||
if header.Number.Uint64() > 1 { // First block doesn't have last sig
|
||||
mask, err := bls_cosi.NewMask(committerKeys, nil) |
||||
if err != nil { |
||||
return ctxerror.New("cannot create group sig mask", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) |
||||
} |
||||
if err := mask.SetMask(header.LastCommitBitmap); err != nil { |
||||
return ctxerror.New("cannot set group sig mask bits", "shardID", header.ShardID, "blockNum", header.Number).WithCause(err) |
||||
} |
||||
|
||||
aggSig := bls.Sign{} |
||||
err = aggSig.Deserialize(header.LastCommitSignature[:]) |
||||
if err != nil { |
||||
return ctxerror.New("unable to deserialize multi-signature from payload").WithCause(err) |
||||
} |
||||
|
||||
blockNumBytes := make([]byte, 8) |
||||
binary.LittleEndian.PutUint64(blockNumBytes, header.Number.Uint64()-1) |
||||
commitPayload := append(blockNumBytes, header.ParentHash[:]...) |
||||
if !aggSig.VerifyHash(mask.AggregatePublic, commitPayload) { |
||||
return ctxerror.New("Failed to verify the signature for cross link header ", "shardID", header.ShardID, "blockNum", header.Number) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ProposeCrossLinkDataForBeaconchain propose cross links for beacon chain new block
|
||||
func (node *Node) ProposeCrossLinkDataForBeaconchain() (types.CrossLinks, error) { |
||||
utils.Logger().Info(). |
||||
Uint64("blockNum", node.Blockchain().CurrentBlock().NumberU64()+1). |
||||
Msg("Proposing cross links ...") |
||||
curBlock := node.Blockchain().CurrentBlock() |
||||
numShards := core.ShardingSchedule.InstanceForEpoch(curBlock.Header().Epoch).NumShards() |
||||
|
||||
shardCrossLinks := make([]types.CrossLinks, numShards) |
||||
|
||||
firstCrossLinkBlock := core.ShardingSchedule.FirstCrossLinkBlock() |
||||
|
||||
for i := 0; i < int(numShards); i++ { |
||||
curShardID := uint32(i) |
||||
lastLink, err := node.Blockchain().ReadShardLastCrossLink(curShardID) |
||||
|
||||
lastLinkblockNum := big.NewInt(int64(firstCrossLinkBlock)) |
||||
blockNumoffset := 0 |
||||
if err == nil && lastLink != nil { |
||||
blockNumoffset = 1 |
||||
lastLinkblockNum = lastLink.BlockNum() |
||||
} |
||||
|
||||
for true { |
||||
link, err := node.Blockchain().ReadCrossLink(curShardID, lastLinkblockNum.Uint64()+uint64(blockNumoffset), true) |
||||
if err != nil || link == nil { |
||||
break |
||||
} |
||||
|
||||
if link.BlockNum().Uint64() > firstCrossLinkBlock { |
||||
if lastLink == nil { |
||||
utils.Logger().Debug(). |
||||
Err(err). |
||||
Msgf("[CrossLink] Haven't received the first cross link %d", link.BlockNum().Uint64()) |
||||
} else { |
||||
err := node.VerifyCrosslinkHeader(lastLink.Header(), link.Header()) |
||||
if err != nil { |
||||
utils.Logger().Debug(). |
||||
Err(err). |
||||
Msgf("[CrossLink] Failed verifying temp cross link %d", link.BlockNum().Uint64()) |
||||
break |
||||
} |
||||
} |
||||
} |
||||
shardCrossLinks[i] = append(shardCrossLinks[i], *link) |
||||
lastLink = link |
||||
blockNumoffset++ |
||||
} |
||||
} |
||||
|
||||
crossLinksToPropose := types.CrossLinks{} |
||||
for _, crossLinks := range shardCrossLinks { |
||||
crossLinksToPropose = append(crossLinksToPropose, crossLinks...) |
||||
} |
||||
if len(crossLinksToPropose) != 0 { |
||||
crossLinksToPropose.Sort() |
||||
|
||||
return crossLinksToPropose, nil |
||||
} |
||||
return types.CrossLinks{}, errors.New("No cross link to propose") |
||||
} |
||||
|
||||
// ProcessReceiptMessage store the receipts and merkle proof in local data store
|
||||
func (node *Node) ProcessReceiptMessage(msgPayload []byte) { |
||||
cxp := types.CXReceiptsProof{} |
||||
if err := rlp.DecodeBytes(msgPayload, &cxp); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("[ProcessReceiptMessage] Unable to Decode message Payload") |
||||
return |
||||
} |
||||
|
||||
if err := cxp.IsValidCXReceiptsProof(); err != nil { |
||||
utils.Logger().Error().Err(err).Msg("[ProcessReceiptMessage] Invalid CXReceiptsProof") |
||||
return |
||||
} |
||||
|
||||
// TODO: check message signature is from the nodes of source shard.
|
||||
|
||||
// TODO: remove in future if not useful
|
||||
node.Blockchain().WriteCXReceipts(cxp.MerkleProof.ShardID, cxp.MerkleProof.BlockNum.Uint64(), cxp.MerkleProof.BlockHash, cxp.Receipts, true) |
||||
|
||||
node.AddPendingReceipts(&cxp) |
||||
} |
@ -0,0 +1,10 @@ |
||||
package node |
||||
|
||||
import ( |
||||
"errors" |
||||
) |
||||
|
||||
var ( |
||||
// ErrCrosslinkVerificationFail ...
|
||||
ErrCrosslinkVerificationFail = errors.New("Crosslink Verification Failed") |
||||
) |
@ -1,3 +1,3 @@ |
||||
./test/kill_node.sh |
||||
rm -rf tmp_log* |
||||
./test/deploy.sh -D 600 ./test/configs/local-resharding.txt |
||||
./test/deploy.sh -D 60000 ./test/configs/local-resharding.txt |
||||
|
@ -1,9 +1,3 @@ |
||||
#!/bin/bash |
||||
|
||||
for pid in `/bin/ps -fu $USER| grep "harmony\|txgen\|soldier\|commander\|profiler\|beacon\|bootnode" | grep -v "grep" | grep -v "vi" | awk '{print $2}'`; |
||||
do |
||||
echo 'Killed process: '$pid |
||||
kill -9 $pid |
||||
done |
||||
|
||||
pkill -9 '^(harmony|txgen|soldier|commander|profiler|beacon|bootnode)$' | sed 's/^/Killed process: /' |
||||
rm -rf db-127.0.0.1-* |
||||
|
Loading…
Reference in new issue