commit
309a2aef46
@ -1,97 +0,0 @@ |
||||
package resharding |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
msg_pb "github.com/harmony-one/harmony/api/proto/message" |
||||
"github.com/harmony-one/harmony/core" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// Constants for resharding service.
|
||||
const ( |
||||
ReshardingCheckTime = time.Second |
||||
) |
||||
|
||||
// Service is the role conversion service.
|
||||
type Service struct { |
||||
stopChan chan struct{} |
||||
stoppedChan chan struct{} |
||||
messageChan chan *msg_pb.Message |
||||
beaconChain *core.BlockChain |
||||
} |
||||
|
||||
// New returns role conversion service.
|
||||
func New(beaconChain *core.BlockChain) *Service { |
||||
return &Service{beaconChain: beaconChain} |
||||
} |
||||
|
||||
// StartService starts role conversion service.
|
||||
func (s *Service) StartService() { |
||||
s.stopChan = make(chan struct{}) |
||||
s.stoppedChan = make(chan struct{}) |
||||
|
||||
s.Init() |
||||
s.Run(s.stopChan, s.stoppedChan) |
||||
} |
||||
|
||||
// Init initializes role conversion service.
|
||||
func (s *Service) Init() { |
||||
} |
||||
|
||||
// Run runs role conversion.
|
||||
func (s *Service) Run(stopChan chan struct{}, stoppedChan chan struct{}) { |
||||
go func() { |
||||
defer close(stoppedChan) |
||||
for { |
||||
select { |
||||
default: |
||||
utils.Logger().Info().Msg("Running role conversion") |
||||
// TODO: Write some logic here.
|
||||
s.DoService() |
||||
case <-stopChan: |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// DoService does role conversion.
|
||||
func (s *Service) DoService() { |
||||
tick := time.NewTicker(ReshardingCheckTime) |
||||
// Get current shard state hash.
|
||||
currentShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash() |
||||
for { |
||||
select { |
||||
case <-tick.C: |
||||
LatestShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash() |
||||
if currentShardStateHash != LatestShardStateHash { |
||||
// TODO(minhdoan): Add resharding logic later after modifying the resharding func as it current doesn't calculate the role (leader/validator)
|
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// StopService stops role conversion service.
|
||||
func (s *Service) StopService() { |
||||
utils.Logger().Info().Msg("Stopping role conversion service") |
||||
s.stopChan <- struct{}{} |
||||
<-s.stoppedChan |
||||
utils.Logger().Info().Msg("Role conversion stopped") |
||||
} |
||||
|
||||
// NotifyService notify service
|
||||
func (s *Service) NotifyService(params map[string]interface{}) { |
||||
return |
||||
} |
||||
|
||||
// SetMessageChan sets up message channel to service.
|
||||
func (s *Service) SetMessageChan(messageChan chan *msg_pb.Message) { |
||||
s.messageChan = messageChan |
||||
} |
||||
|
||||
// APIs for the services.
|
||||
func (s *Service) APIs() []rpc.API { |
||||
return nil |
||||
} |
@ -1,16 +1,421 @@ |
||||
package v3 |
||||
|
||||
import ( |
||||
v2 "github.com/harmony-one/harmony/block/v2" |
||||
"io" |
||||
"math/big" |
||||
"unsafe" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
ethtypes "github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/rs/zerolog" |
||||
|
||||
blockif "github.com/harmony-one/harmony/block/interface" |
||||
"github.com/harmony-one/harmony/crypto/hash" |
||||
"github.com/harmony-one/harmony/shard" |
||||
) |
||||
|
||||
// Header v3 has the same structure as v2 header
|
||||
// It is used to identify the body v3 which including staking txs
|
||||
// Header is the V3 block header.
|
||||
// V3 block header is exactly the same
|
||||
// we copy the code instead of embedded v2 header into v3
|
||||
// when we do type checking in NewBodyForMatchingHeader
|
||||
// the embedded structure will return v2 header type instead of v3 type
|
||||
type Header struct { |
||||
v2.Header |
||||
fields headerFields |
||||
} |
||||
|
||||
// EncodeRLP encodes the header fields into RLP format.
|
||||
func (h *Header) EncodeRLP(w io.Writer) error { |
||||
return rlp.Encode(w, &h.fields) |
||||
} |
||||
|
||||
// DecodeRLP decodes the given RLP decode stream into the header fields.
|
||||
func (h *Header) DecodeRLP(s *rlp.Stream) error { |
||||
return s.Decode(&h.fields) |
||||
} |
||||
|
||||
// NewHeader creates a new header object.
|
||||
func NewHeader() *Header { |
||||
return &Header{*v2.NewHeader()} |
||||
return &Header{headerFields{ |
||||
Number: new(big.Int), |
||||
Time: new(big.Int), |
||||
ViewID: new(big.Int), |
||||
Epoch: new(big.Int), |
||||
}} |
||||
} |
||||
|
||||
type headerFields struct { |
||||
ParentHash common.Hash `json:"parentHash" gencodec:"required"` |
||||
Coinbase common.Address `json:"miner" gencodec:"required"` |
||||
Root common.Hash `json:"stateRoot" gencodec:"required"` |
||||
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` |
||||
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` |
||||
OutgoingReceiptHash common.Hash `json:"outgoingReceiptsRoot" gencodec:"required"` |
||||
IncomingReceiptHash common.Hash `json:"incomingReceiptsRoot" gencodec:"required"` |
||||
Bloom ethtypes.Bloom `json:"logsBloom" gencodec:"required"` |
||||
Number *big.Int `json:"number" gencodec:"required"` |
||||
GasLimit uint64 `json:"gasLimit" gencodec:"required"` |
||||
GasUsed uint64 `json:"gasUsed" gencodec:"required"` |
||||
Time *big.Int `json:"timestamp" gencodec:"required"` |
||||
Extra []byte `json:"extraData" gencodec:"required"` |
||||
MixDigest common.Hash `json:"mixHash" gencodec:"required"` |
||||
// Additional Fields
|
||||
ViewID *big.Int `json:"viewID" gencodec:"required"` |
||||
Epoch *big.Int `json:"epoch" gencodec:"required"` |
||||
ShardID uint32 `json:"shardID" gencodec:"required"` |
||||
LastCommitSignature [96]byte `json:"lastCommitSignature" gencodec:"required"` |
||||
LastCommitBitmap []byte `json:"lastCommitBitmap" gencodec:"required"` // Contains which validator signed
|
||||
ShardStateHash common.Hash `json:"shardStateRoot"` |
||||
Vrf []byte `json:"vrf"` |
||||
Vdf []byte `json:"vdf"` |
||||
ShardState []byte `json:"shardState"` |
||||
CrossLinks []byte `json:"crossLink"` |
||||
} |
||||
|
||||
// ParentHash is the header hash of the parent block. For the genesis block
|
||||
// which has no parent by definition, this field is zeroed out.
|
||||
func (h *Header) ParentHash() common.Hash { |
||||
return h.fields.ParentHash |
||||
} |
||||
|
||||
// SetParentHash sets the parent hash field.
|
||||
func (h *Header) SetParentHash(newParentHash common.Hash) { |
||||
h.fields.ParentHash = newParentHash |
||||
} |
||||
|
||||
// Coinbase is the address of the node that proposed this block and all
|
||||
// transactions in it.
|
||||
func (h *Header) Coinbase() common.Address { |
||||
return h.fields.Coinbase |
||||
} |
||||
|
||||
// SetCoinbase sets the coinbase address field.
|
||||
func (h *Header) SetCoinbase(newCoinbase common.Address) { |
||||
h.fields.Coinbase = newCoinbase |
||||
} |
||||
|
||||
// Root is the state (account) trie root hash.
|
||||
func (h *Header) Root() common.Hash { |
||||
return h.fields.Root |
||||
} |
||||
|
||||
// SetRoot sets the state trie root hash field.
|
||||
func (h *Header) SetRoot(newRoot common.Hash) { |
||||
h.fields.Root = newRoot |
||||
} |
||||
|
||||
// TxHash is the transaction trie root hash.
|
||||
func (h *Header) TxHash() common.Hash { |
||||
return h.fields.TxHash |
||||
} |
||||
|
||||
// SetTxHash sets the transaction trie root hash field.
|
||||
func (h *Header) SetTxHash(newTxHash common.Hash) { |
||||
h.fields.TxHash = newTxHash |
||||
} |
||||
|
||||
// ReceiptHash is the same-shard transaction receipt trie hash.
|
||||
func (h *Header) ReceiptHash() common.Hash { |
||||
return h.fields.ReceiptHash |
||||
} |
||||
|
||||
// SetReceiptHash sets the same-shard transaction receipt trie hash.
|
||||
func (h *Header) SetReceiptHash(newReceiptHash common.Hash) { |
||||
h.fields.ReceiptHash = newReceiptHash |
||||
} |
||||
|
||||
// OutgoingReceiptHash is the egress transaction receipt trie hash.
|
||||
func (h *Header) OutgoingReceiptHash() common.Hash { |
||||
return h.fields.OutgoingReceiptHash |
||||
} |
||||
|
||||
// SetOutgoingReceiptHash sets the egress transaction receipt trie hash.
|
||||
func (h *Header) SetOutgoingReceiptHash(newOutgoingReceiptHash common.Hash) { |
||||
h.fields.OutgoingReceiptHash = newOutgoingReceiptHash |
||||
} |
||||
|
||||
// IncomingReceiptHash is the ingress transaction receipt trie hash.
|
||||
func (h *Header) IncomingReceiptHash() common.Hash { |
||||
return h.fields.IncomingReceiptHash |
||||
} |
||||
|
||||
// SetIncomingReceiptHash sets the ingress transaction receipt trie hash.
|
||||
func (h *Header) SetIncomingReceiptHash(newIncomingReceiptHash common.Hash) { |
||||
h.fields.IncomingReceiptHash = newIncomingReceiptHash |
||||
} |
||||
|
||||
// Bloom is the Bloom filter that indexes accounts and topics logged by smart
|
||||
// contract transactions (executions) in this block.
|
||||
func (h *Header) Bloom() ethtypes.Bloom { |
||||
return h.fields.Bloom |
||||
} |
||||
|
||||
// SetBloom sets the smart contract log Bloom filter for this block.
|
||||
func (h *Header) SetBloom(newBloom ethtypes.Bloom) { |
||||
h.fields.Bloom = newBloom |
||||
} |
||||
|
||||
// Number is the block number.
|
||||
//
|
||||
// The returned instance is a copy; the caller may do anything with it.
|
||||
func (h *Header) Number() *big.Int { |
||||
return new(big.Int).Set(h.fields.Number) |
||||
} |
||||
|
||||
// SetNumber sets the block number.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetNumber(newNumber *big.Int) { |
||||
h.fields.Number = new(big.Int).Set(newNumber) |
||||
} |
||||
|
||||
// GasLimit is the gas limit for transactions in this block.
|
||||
func (h *Header) GasLimit() uint64 { |
||||
return h.fields.GasLimit |
||||
} |
||||
|
||||
// SetGasLimit sets the gas limit for transactions in this block.
|
||||
func (h *Header) SetGasLimit(newGasLimit uint64) { |
||||
h.fields.GasLimit = newGasLimit |
||||
} |
||||
|
||||
// GasUsed is the amount of gas used by transactions in this block.
|
||||
func (h *Header) GasUsed() uint64 { |
||||
return h.fields.GasUsed |
||||
} |
||||
|
||||
// SetGasUsed sets the amount of gas used by transactions in this block.
|
||||
func (h *Header) SetGasUsed(newGasUsed uint64) { |
||||
h.fields.GasUsed = newGasUsed |
||||
} |
||||
|
||||
// Time is the UNIX timestamp of this block.
|
||||
//
|
||||
// The returned instance is a copy; the caller may do anything with it.
|
||||
func (h *Header) Time() *big.Int { |
||||
return new(big.Int).Set(h.fields.Time) |
||||
} |
||||
|
||||
// SetTime sets the UNIX timestamp of this block.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetTime(newTime *big.Int) { |
||||
h.fields.Time = new(big.Int).Set(newTime) |
||||
} |
||||
|
||||
// Extra is the extra data field of this block.
|
||||
//
|
||||
// The returned slice is a copy; the caller may do anything with it.
|
||||
func (h *Header) Extra() []byte { |
||||
return append(h.fields.Extra[:0:0], h.fields.Extra...) |
||||
} |
||||
|
||||
// SetExtra sets the extra data field of this block.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetExtra(newExtra []byte) { |
||||
h.fields.Extra = append(newExtra[:0:0], newExtra...) |
||||
} |
||||
|
||||
// MixDigest is the mixhash.
|
||||
//
|
||||
// This field is a remnant from Ethereum, and Harmony does not use it and always
|
||||
// zeroes it out.
|
||||
func (h *Header) MixDigest() common.Hash { |
||||
return h.fields.MixDigest |
||||
} |
||||
|
||||
// SetMixDigest sets the mixhash of this block.
|
||||
func (h *Header) SetMixDigest(newMixDigest common.Hash) { |
||||
h.fields.MixDigest = newMixDigest |
||||
} |
||||
|
||||
// ViewID is the ID of the view in which this block was originally proposed.
|
||||
//
|
||||
// It normally increases by one for each subsequent block, or by more than one
|
||||
// if one or more PBFT/FBFT view changes have occurred.
|
||||
//
|
||||
// The returned instance is a copy; the caller may do anything with it.
|
||||
func (h *Header) ViewID() *big.Int { |
||||
return new(big.Int).Set(h.fields.ViewID) |
||||
} |
||||
|
||||
// SetViewID sets the view ID in which the block was originally proposed.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetViewID(newViewID *big.Int) { |
||||
h.fields.ViewID = new(big.Int).Set(newViewID) |
||||
} |
||||
|
||||
// Epoch is the epoch number of this block.
|
||||
//
|
||||
// The returned instance is a copy; the caller may do anything with it.
|
||||
func (h *Header) Epoch() *big.Int { |
||||
return new(big.Int).Set(h.fields.Epoch) |
||||
} |
||||
|
||||
// SetEpoch sets the epoch number of this block.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetEpoch(newEpoch *big.Int) { |
||||
h.fields.Epoch = new(big.Int).Set(newEpoch) |
||||
} |
||||
|
||||
// ShardID is the shard ID to which this block belongs.
|
||||
func (h *Header) ShardID() uint32 { |
||||
return h.fields.ShardID |
||||
} |
||||
|
||||
// SetShardID sets the shard ID to which this block belongs.
|
||||
func (h *Header) SetShardID(newShardID uint32) { |
||||
h.fields.ShardID = newShardID |
||||
} |
||||
|
||||
// LastCommitSignature is the FBFT commit group signature for the last block.
|
||||
func (h *Header) LastCommitSignature() [96]byte { |
||||
return h.fields.LastCommitSignature |
||||
} |
||||
|
||||
// SetLastCommitSignature sets the FBFT commit group signature for the last
|
||||
// block.
|
||||
func (h *Header) SetLastCommitSignature(newLastCommitSignature [96]byte) { |
||||
h.fields.LastCommitSignature = newLastCommitSignature |
||||
} |
||||
|
||||
// LastCommitBitmap is the signatory bitmap of the previous block. Bit
|
||||
// positions index into committee member array.
|
||||
//
|
||||
// The returned slice is a copy; the caller may do anything with it.
|
||||
func (h *Header) LastCommitBitmap() []byte { |
||||
return append(h.fields.LastCommitBitmap[:0:0], h.fields.LastCommitBitmap...) |
||||
} |
||||
|
||||
// SetLastCommitBitmap sets the signatory bitmap of the previous block.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetLastCommitBitmap(newLastCommitBitmap []byte) { |
||||
h.fields.LastCommitBitmap = append(newLastCommitBitmap[:0:0], newLastCommitBitmap...) |
||||
} |
||||
|
||||
// ShardStateHash is the shard state hash.
|
||||
func (h *Header) ShardStateHash() common.Hash { |
||||
return h.fields.ShardStateHash |
||||
} |
||||
|
||||
// SetShardStateHash sets the shard state hash.
|
||||
func (h *Header) SetShardStateHash(newShardStateHash common.Hash) { |
||||
h.fields.ShardStateHash = newShardStateHash |
||||
} |
||||
|
||||
// Vrf is the output of the VRF for the epoch.
|
||||
//
|
||||
// The returned slice is a copy; the caller may do anything with it.
|
||||
func (h *Header) Vrf() []byte { |
||||
return append(h.fields.Vrf[:0:0], h.fields.Vrf...) |
||||
} |
||||
|
||||
// SetVrf sets the output of the VRF for the epoch.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetVrf(newVrf []byte) { |
||||
h.fields.Vrf = append(newVrf[:0:0], newVrf...) |
||||
} |
||||
|
||||
// Vdf is the output of the VDF for the epoch.
|
||||
//
|
||||
// The returned slice is a copy; the caller may do anything with it.
|
||||
func (h *Header) Vdf() []byte { |
||||
return append(h.fields.Vdf[:0:0], h.fields.Vdf...) |
||||
} |
||||
|
||||
// SetVdf sets the output of the VDF for the epoch.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetVdf(newVdf []byte) { |
||||
h.fields.Vdf = append(newVdf[:0:0], newVdf...) |
||||
} |
||||
|
||||
// ShardState is the RLP-encoded form of shard state (list of committees) for
|
||||
// the next epoch.
|
||||
//
|
||||
// The returned slice is a copy; the caller may do anything with it.
|
||||
func (h *Header) ShardState() []byte { |
||||
return append(h.fields.ShardState[:0:0], h.fields.ShardState...) |
||||
} |
||||
|
||||
// SetShardState sets the RLP-encoded form of shard state
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetShardState(newShardState []byte) { |
||||
h.fields.ShardState = append(newShardState[:0:0], newShardState...) |
||||
} |
||||
|
||||
// CrossLinks is the RLP-encoded form of non-beacon block headers chosen to be
|
||||
// canonical by the beacon committee. This field is present only on beacon
|
||||
// chain block headers.
|
||||
//
|
||||
// The returned slice is a copy; the caller may do anything with it.
|
||||
func (h *Header) CrossLinks() []byte { |
||||
return append(h.fields.CrossLinks[:0:0], h.fields.CrossLinks...) |
||||
} |
||||
|
||||
// SetCrossLinks sets the RLP-encoded form of non-beacon block headers chosen to
|
||||
// be canonical by the beacon committee.
|
||||
//
|
||||
// It stores a copy; the caller may freely modify the original.
|
||||
func (h *Header) SetCrossLinks(newCrossLinks []byte) { |
||||
h.fields.CrossLinks = append(newCrossLinks[:0:0], newCrossLinks...) |
||||
} |
||||
|
||||
// field type overrides for gencodec
|
||||
type headerMarshaling struct { |
||||
Difficulty *hexutil.Big |
||||
Number *hexutil.Big |
||||
GasLimit hexutil.Uint64 |
||||
GasUsed hexutil.Uint64 |
||||
Time *hexutil.Big |
||||
Extra hexutil.Bytes |
||||
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
|
||||
} |
||||
|
||||
// Hash returns the block hash of the header, which is simply the keccak256 hash of its
|
||||
// RLP encoding.
|
||||
func (h *Header) Hash() common.Hash { |
||||
return hash.FromRLP(h) |
||||
} |
||||
|
||||
// Size returns the approximate memory used by all internal contents. It is used
|
||||
// to approximate and limit the memory consumption of various caches.
|
||||
func (h *Header) Size() common.StorageSize { |
||||
// TODO: update with new fields
|
||||
return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra())+(h.Number().BitLen()+h.Time().BitLen())/8) |
||||
} |
||||
|
||||
// Logger returns a sub-logger with block contexts added.
|
||||
func (h *Header) Logger(logger *zerolog.Logger) *zerolog.Logger { |
||||
nlogger := logger. |
||||
With(). |
||||
Str("blockHash", h.Hash().Hex()). |
||||
Uint32("blockShard", h.ShardID()). |
||||
Uint64("blockEpoch", h.Epoch().Uint64()). |
||||
Uint64("blockNumber", h.Number().Uint64()). |
||||
Logger() |
||||
return &nlogger |
||||
} |
||||
|
||||
// GetShardState returns the deserialized shard state object.
|
||||
func (h *Header) GetShardState() (shard.State, error) { |
||||
shardState := shard.State{} |
||||
err := rlp.DecodeBytes(h.ShardState(), &shardState) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return shardState, nil |
||||
} |
||||
|
||||
// Copy returns a copy of the given header.
|
||||
func (h *Header) Copy() blockif.Header { |
||||
cpy := *h |
||||
return &cpy |
||||
} |
||||
|
@ -0,0 +1,73 @@ |
||||
package quorum |
||||
|
||||
import ( |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
// "github.com/harmony-one/harmony/staking/effective"
|
||||
) |
||||
|
||||
type uniformVoteWeight struct { |
||||
SignatureReader |
||||
DependencyInjectionWriter |
||||
} |
||||
|
||||
// Policy ..
|
||||
func (v *uniformVoteWeight) Policy() Policy { |
||||
return SuperMajorityVote |
||||
} |
||||
|
||||
// IsQuorumAchieved ..
|
||||
func (v *uniformVoteWeight) IsQuorumAchieved(p Phase) bool { |
||||
r := v.SignersCount(p) >= v.QuorumThreshold().Int64() |
||||
utils.Logger().Info().Str("phase", p.String()). |
||||
Int64("signers-count", v.SignersCount(p)). |
||||
Int64("threshold", v.QuorumThreshold().Int64()). |
||||
Int64("participants", v.ParticipantsCount()). |
||||
Msg("Quorum details") |
||||
return r |
||||
} |
||||
|
||||
// QuorumThreshold ..
|
||||
func (v *uniformVoteWeight) QuorumThreshold() *big.Int { |
||||
return big.NewInt(v.ParticipantsCount()*2/3 + 1) |
||||
} |
||||
|
||||
// RewardThreshold ..
|
||||
func (v *uniformVoteWeight) IsRewardThresholdAchieved() bool { |
||||
return v.SignersCount(Commit) >= (v.ParticipantsCount() * 9 / 10) |
||||
} |
||||
|
||||
// func (v *uniformVoteWeight) UpdateVotingPower(effective.StakeKeeper) {
|
||||
// NO-OP do not add anything here
|
||||
// }
|
||||
|
||||
// ToggleActive for uniform vote is a no-op, always says that voter is active
|
||||
func (v *uniformVoteWeight) ToggleActive(*bls.PublicKey) bool { |
||||
// NO-OP do not add anything here
|
||||
return true |
||||
} |
||||
|
||||
// Award ..
|
||||
func (v *uniformVoteWeight) Award( |
||||
// Here hook is the callback which gets the amount the earner is due in just reward
|
||||
// up to the hook to do side-effects like write the statedb
|
||||
Pie *big.Int, earners []common.Address, hook func(earner common.Address, due *big.Int), |
||||
) *big.Int { |
||||
payout := big.NewInt(0) |
||||
last := big.NewInt(0) |
||||
count := big.NewInt(int64(len(earners))) |
||||
|
||||
for i, account := range earners { |
||||
cur := big.NewInt(0) |
||||
cur.Mul(Pie, big.NewInt(int64(i+1))).Div(cur, count) |
||||
diff := big.NewInt(0).Sub(cur, last) |
||||
hook(common.Address(account), diff) |
||||
payout = big.NewInt(0).Add(payout, diff) |
||||
last = cur |
||||
} |
||||
|
||||
return payout |
||||
} |
@ -0,0 +1,75 @@ |
||||
package quorum |
||||
|
||||
import ( |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/numeric" |
||||
"github.com/harmony-one/harmony/shard" |
||||
) |
||||
|
||||
var ( |
||||
twoThirds = numeric.NewDec(2).QuoInt64(3).Int |
||||
) |
||||
|
||||
type stakedVoter struct { |
||||
isActive, isHarmonyNode bool |
||||
effective numeric.Dec |
||||
} |
||||
|
||||
type stakedVoteWeight struct { |
||||
SignatureReader |
||||
DependencyInjectionWriter |
||||
// EPOS based staking
|
||||
validatorStakes map[[shard.PublicKeySizeInBytes]byte]stakedVoter |
||||
totalEffectiveStakedAmount *big.Int |
||||
} |
||||
|
||||
// Policy ..
|
||||
func (v *stakedVoteWeight) Policy() Policy { |
||||
return SuperMajorityStake |
||||
} |
||||
|
||||
// We must maintain 2/3 quoroum, so whatever is 2/3 staked amount,
|
||||
// we divide that out & you
|
||||
// IsQuorumAchieved ..
|
||||
func (v *stakedVoteWeight) IsQuorumAchieved(p Phase) bool { |
||||
// TODO Implement this logic
|
||||
return true |
||||
} |
||||
|
||||
// QuorumThreshold ..
|
||||
func (v *stakedVoteWeight) QuorumThreshold() *big.Int { |
||||
return new(big.Int).Mul(v.totalEffectiveStakedAmount, twoThirds) |
||||
} |
||||
|
||||
// RewardThreshold ..
|
||||
func (v *stakedVoteWeight) IsRewardThresholdAchieved() bool { |
||||
// TODO Implement
|
||||
return false |
||||
} |
||||
|
||||
// HACK
|
||||
var ( |
||||
hSentinel = big.NewInt(0) |
||||
hEffectiveSentinel = numeric.ZeroDec() |
||||
) |
||||
|
||||
// Award ..
|
||||
func (v *stakedVoteWeight) Award( |
||||
Pie *big.Int, earners []common.Address, hook func(earner common.Address, due *big.Int), |
||||
) *big.Int { |
||||
// TODO Implement
|
||||
return nil |
||||
} |
||||
|
||||
// UpdateVotingPower called only at epoch change, prob need to move to CalculateShardState
|
||||
// func (v *stakedVoteWeight) UpdateVotingPower(keeper effective.StakeKeeper) {
|
||||
// TODO Implement
|
||||
// }
|
||||
|
||||
func (v *stakedVoteWeight) ToggleActive(*bls.PublicKey) bool { |
||||
// TODO Implement
|
||||
return true |
||||
} |
@ -0,0 +1,16 @@ |
||||
package reward |
||||
|
||||
import ( |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// Distributor ..
|
||||
type Distributor interface { |
||||
Award( |
||||
Pie *big.Int, |
||||
earners []common.Address, |
||||
hook func(earner common.Address, due *big.Int), |
||||
) (payout *big.Int) |
||||
} |
@ -1,259 +0,0 @@ |
||||
package core |
||||
|
||||
import ( |
||||
"encoding/hex" |
||||
"errors" |
||||
"math/big" |
||||
"math/rand" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
common2 "github.com/harmony-one/harmony/internal/common" |
||||
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" |
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/shard" |
||||
) |
||||
|
||||
const ( |
||||
// GenesisEpoch is the number of the genesis epoch.
|
||||
GenesisEpoch = 0 |
||||
// CuckooRate is the percentage of nodes getting reshuffled in the second step of cuckoo resharding.
|
||||
CuckooRate = 0.1 |
||||
) |
||||
|
||||
// ShardingState is data structure hold the sharding state
|
||||
type ShardingState struct { |
||||
epoch uint64 // current epoch
|
||||
rnd uint64 // random seed for resharding
|
||||
numShards int // TODO ek – equal to len(shardState); remove this
|
||||
shardState shard.State |
||||
} |
||||
|
||||
// sortedCommitteeBySize will sort shards by size
|
||||
// Suppose there are N shards, the first N/2 larger shards are called active committees
|
||||
// the rest N/2 smaller committees are called inactive committees
|
||||
// actually they are all just normal shards
|
||||
// TODO: sort the committee weighted by total staking instead of shard size
|
||||
func (ss *ShardingState) sortCommitteeBySize() { |
||||
sort.Slice(ss.shardState, func(i, j int) bool { |
||||
return len(ss.shardState[i].NodeList) > len(ss.shardState[j].NodeList) |
||||
}) |
||||
} |
||||
|
||||
// assignNewNodes add new nodes into the N/2 active committees evenly
|
||||
func (ss *ShardingState) assignNewNodes(newNodeList []shard.NodeID) { |
||||
ss.sortCommitteeBySize() |
||||
numActiveShards := ss.numShards / 2 |
||||
Shuffle(newNodeList) |
||||
for i, nid := range newNodeList { |
||||
id := 0 |
||||
if numActiveShards > 0 { |
||||
id = i % numActiveShards |
||||
} |
||||
if id < len(ss.shardState) { |
||||
ss.shardState[id].NodeList = append(ss.shardState[id].NodeList, nid) |
||||
} else { |
||||
utils.Logger().Error().Int("id", id).Int("shardState Count", len(ss.shardState)).Msg("assignNewNodes index out of range") |
||||
} |
||||
} |
||||
} |
||||
|
||||
// cuckooResharding uses cuckoo rule to reshard X% of active committee(shards) into inactive committee(shards)
|
||||
func (ss *ShardingState) cuckooResharding(percent float64) { |
||||
numActiveShards := ss.numShards / 2 |
||||
kickedNodes := []shard.NodeID{} |
||||
for i := range ss.shardState { |
||||
if i >= numActiveShards { |
||||
break |
||||
} |
||||
numKicked := int(percent * float64(len(ss.shardState[i].NodeList))) |
||||
if numKicked == 0 { |
||||
numKicked++ // At least kick one node out
|
||||
} |
||||
length := len(ss.shardState[i].NodeList) |
||||
if length-numKicked <= 0 { |
||||
continue // Never empty a shard
|
||||
} |
||||
tmp := ss.shardState[i].NodeList[length-numKicked:] |
||||
kickedNodes = append(kickedNodes, tmp...) |
||||
ss.shardState[i].NodeList = ss.shardState[i].NodeList[:length-numKicked] |
||||
} |
||||
|
||||
Shuffle(kickedNodes) |
||||
numInactiveShards := ss.numShards - numActiveShards |
||||
for i, nid := range kickedNodes { |
||||
id := numActiveShards |
||||
if numInactiveShards > 0 { |
||||
id += i % numInactiveShards |
||||
} |
||||
ss.shardState[id].NodeList = append(ss.shardState[id].NodeList, nid) |
||||
} |
||||
} |
||||
|
||||
// Reshard will first add new nodes into shards, then use cuckoo rule to reshard to get new shard state
|
||||
func (ss *ShardingState) Reshard(newNodeList []shard.NodeID, percent float64) { |
||||
rand.Seed(int64(ss.rnd)) |
||||
ss.sortCommitteeBySize() |
||||
|
||||
// Take out and preserve leaders
|
||||
leaders := []shard.NodeID{} |
||||
for i := 0; i < ss.numShards; i++ { |
||||
if len(ss.shardState[i].NodeList) > 0 { |
||||
leaders = append(leaders, ss.shardState[i].NodeList[0]) |
||||
ss.shardState[i].NodeList = ss.shardState[i].NodeList[1:] |
||||
// Also shuffle the rest of the nodes
|
||||
Shuffle(ss.shardState[i].NodeList) |
||||
} |
||||
} |
||||
|
||||
ss.assignNewNodes(newNodeList) |
||||
ss.cuckooResharding(percent) |
||||
|
||||
// Put leader back
|
||||
if len(leaders) < ss.numShards { |
||||
utils.Logger().Error().Msg("Not enough leaders to assign to shards") |
||||
} |
||||
for i := 0; i < ss.numShards; i++ { |
||||
ss.shardState[i].NodeList = append([]shard.NodeID{leaders[i]}, ss.shardState[i].NodeList...) |
||||
} |
||||
} |
||||
|
||||
// Shuffle will shuffle the list with result uniquely determined by seed, assuming there is no repeat items in the list
|
||||
func Shuffle(list []shard.NodeID) { |
||||
// Sort to make sure everyone will generate the same with the same rand seed.
|
||||
sort.Slice(list, func(i, j int) bool { |
||||
return shard.CompareNodeIDByBLSKey(list[i], list[j]) == -1 |
||||
}) |
||||
rand.Shuffle(len(list), func(i, j int) { |
||||
list[i], list[j] = list[j], list[i] |
||||
}) |
||||
} |
||||
|
||||
// GetEpochFromBlockNumber calculates the epoch number the block belongs to
|
||||
func GetEpochFromBlockNumber(blockNumber uint64) uint64 { |
||||
return ShardingSchedule.CalcEpochNumber(blockNumber).Uint64() |
||||
} |
||||
|
||||
// GetShardingStateFromBlockChain will retrieve random seed and shard map from beacon chain for given a epoch
|
||||
func GetShardingStateFromBlockChain(bc *BlockChain, epoch *big.Int) (*ShardingState, error) { |
||||
if bc == nil { |
||||
return nil, errors.New("no blockchain is supplied to get shard state") |
||||
} |
||||
shardState, err := bc.ReadShardState(epoch) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
shardState = shardState.DeepCopy() |
||||
|
||||
// TODO(RJ,HB): use real randomness for resharding
|
||||
//blockNumber := GetBlockNumberFromEpoch(epoch.Uint64())
|
||||
//rndSeedBytes := bc.GetVdfByNumber(blockNumber)
|
||||
rndSeed := uint64(0) |
||||
|
||||
return &ShardingState{epoch: epoch.Uint64(), rnd: rndSeed, shardState: shardState, numShards: len(shardState)}, nil |
||||
} |
||||
|
||||
// CalculateNewShardState get sharding state from previous epoch and calculate sharding state for new epoch
|
||||
func CalculateNewShardState(bc *BlockChain, epoch *big.Int) (shard.State, error) { |
||||
if epoch.Cmp(big.NewInt(GenesisEpoch)) == 0 { |
||||
return CalculateInitShardState(), nil |
||||
} |
||||
prevEpoch := new(big.Int).Sub(epoch, common.Big1) |
||||
ss, err := GetShardingStateFromBlockChain(bc, prevEpoch) |
||||
if err != nil { |
||||
return nil, ctxerror.New("cannot retrieve previous sharding state"). |
||||
WithCause(err) |
||||
} |
||||
utils.Logger().Info().Float64("percentage", CuckooRate).Msg("Cuckoo Rate") |
||||
return ss.shardState, nil |
||||
} |
||||
|
||||
// TODO ek – shardingSchedule should really be part of a general-purpose network
|
||||
// configuration. We are OK for the time being,
|
||||
// until the day we should let one node process join multiple networks.
|
||||
|
||||
// ShardingSchedule is the sharding configuration schedule.
|
||||
// Depends on the type of the network. Defaults to the mainnet schedule.
|
||||
var ShardingSchedule shardingconfig.Schedule = shardingconfig.MainnetSchedule |
||||
|
||||
// CalculateInitShardState returns the initial shard state at genesis.
|
||||
func CalculateInitShardState() shard.State { |
||||
return CalculateShardState(big.NewInt(GenesisEpoch)) |
||||
} |
||||
|
||||
// CalculateShardState returns the shard state based on epoch number
|
||||
// This api for getting shard state is what should be used to get shard state regardless of
|
||||
// current chain dependency (ex. getting shard state from block header received during cross-shard transaction)
|
||||
func CalculateShardState(epoch *big.Int) shard.State { |
||||
utils.Logger().Info().Int64("epoch", epoch.Int64()).Msg("Get Shard State of Epoch.") |
||||
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch) |
||||
shardNum := int(shardingConfig.NumShards()) |
||||
shardHarmonyNodes := shardingConfig.NumHarmonyOperatedNodesPerShard() |
||||
shardSize := shardingConfig.NumNodesPerShard() |
||||
hmyAccounts := shardingConfig.HmyAccounts() |
||||
fnAccounts := shardingConfig.FnAccounts() |
||||
|
||||
shardState := shard.State{} |
||||
for i := 0; i < shardNum; i++ { |
||||
com := shard.Committee{ShardID: uint32(i)} |
||||
for j := 0; j < shardHarmonyNodes; j++ { |
||||
index := i + j*shardNum // The initial account to use for genesis nodes
|
||||
|
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(hmyAccounts[index].BlsPublicKey) |
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
EcdsaAddress: common2.ParseAddr(hmyAccounts[index].Address), |
||||
BlsPublicKey: pubKey, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
|
||||
// add FN runner's key
|
||||
for j := shardHarmonyNodes; j < shardSize; j++ { |
||||
index := i + (j-shardHarmonyNodes)*shardNum |
||||
|
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(fnAccounts[index].BlsPublicKey) |
||||
|
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
EcdsaAddress: common2.ParseAddr(fnAccounts[index].Address), |
||||
BlsPublicKey: pubKey, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
shardState = append(shardState, com) |
||||
} |
||||
return shardState |
||||
} |
||||
|
||||
// CalculatePublicKeys returns the publickeys given epoch and shardID
|
||||
func CalculatePublicKeys(epoch *big.Int, shardID uint32) []*bls.PublicKey { |
||||
shardState := CalculateShardState(epoch) |
||||
|
||||
// Update validator public keys
|
||||
committee := shardState.FindCommitteeByID(shardID) |
||||
if committee == nil { |
||||
utils.Logger().Warn().Uint32("shardID", shardID).Uint64("epoch", epoch.Uint64()).Msg("Cannot find committee") |
||||
return nil |
||||
} |
||||
pubKeys := []*bls.PublicKey{} |
||||
for _, node := range committee.NodeList { |
||||
pubKey := &bls.PublicKey{} |
||||
pubKeyBytes := node.BlsPublicKey[:] |
||||
err := pubKey.Deserialize(pubKeyBytes) |
||||
if err != nil { |
||||
utils.Logger().Warn().Str("pubKeyBytes", hex.EncodeToString(pubKeyBytes)).Msg("Cannot Deserialize pubKey") |
||||
return nil |
||||
} |
||||
pubKeys = append(pubKeys, pubKey) |
||||
} |
||||
return pubKeys |
||||
} |
@ -1,12 +0,0 @@ |
||||
## Resharding |
||||
|
||||
In current design, the epoch is defined to be fixed length, the epoch length is a constant parameter BlocksPerEpoch. In future, it will be dynamically adjustable according to security parameter. During the epoch transition, suppose there are N shards, we sort the shards according to the size of active nodes (that had staking for next epoch). The first N/2 larger shards will be called active committees, and the last N/2 smaller shards will be called inactive committees. Don't be confused by |
||||
the name, they are all normal shards with same function. |
||||
|
||||
All the information about sharding will be stored in BeaconChain. A sharding state is defined as a map which maps each NodeID to the ShardID the node belongs to. Every node will have a unique NodeID and be mapped to one ShardID. At the beginning of a new epoch, the BeaconChain leader will propose a new block containing the new sharding state, the new sharding state is uniquely determined by the randomness generated by distributed randomness protocol. During the consensus process, all the validators will perform the same calculation and verify the proposed sharding state is valid. After consensus is reached, each node will write the new sharding state into the block. This block is called epoch block. In current code, it's the first block of each epoch in BeaconChain. |
||||
|
||||
The main function of resharding is CalculcateNewShardState. It will take 3 inputs: newNodeList, oldShardState, randomSeed and output newShardState. |
||||
The newNodeList will be retrieved from BeaconChain staking transaction during the previous epoch. The randomSeed and oldShardState is stored in previous epoch block. It should be noticed that the randomSeed generation currently is mocked. After the distributed randomness protocol(drand) is ready, the drand service will generate the random seed for resharding. |
||||
|
||||
The resharding process is as follows: we first get newNodeList from staking transactions from previous epoch and assign the new nodes evenly into the N/2 active committees. Then, we kick out X% of nodes from each active committees and put these kicked out nodes into inactive committees evenly. The percentage X roughly equals to the percentage of new nodes into active committee in order to balance the committee size. |
||||
|
@ -1,149 +0,0 @@ |
||||
package core |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/rand" |
||||
"strconv" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
|
||||
"github.com/harmony-one/harmony/shard" |
||||
|
||||
"github.com/stretchr/testify/assert" |
||||
) |
||||
|
||||
var ( |
||||
blsPubKey1 = [48]byte{} |
||||
blsPubKey2 = [48]byte{} |
||||
blsPubKey3 = [48]byte{} |
||||
blsPubKey4 = [48]byte{} |
||||
blsPubKey5 = [48]byte{} |
||||
blsPubKey6 = [48]byte{} |
||||
blsPubKey7 = [48]byte{} |
||||
blsPubKey8 = [48]byte{} |
||||
blsPubKey9 = [48]byte{} |
||||
blsPubKey10 = [48]byte{} |
||||
) |
||||
|
||||
func init() { |
||||
copy(blsPubKey1[:], []byte("random key 1")) |
||||
copy(blsPubKey2[:], []byte("random key 2")) |
||||
copy(blsPubKey3[:], []byte("random key 3")) |
||||
copy(blsPubKey4[:], []byte("random key 4")) |
||||
copy(blsPubKey5[:], []byte("random key 5")) |
||||
copy(blsPubKey6[:], []byte("random key 6")) |
||||
copy(blsPubKey7[:], []byte("random key 7")) |
||||
copy(blsPubKey8[:], []byte("random key 8")) |
||||
copy(blsPubKey9[:], []byte("random key 9")) |
||||
copy(blsPubKey10[:], []byte("random key 10")) |
||||
} |
||||
|
||||
func fakeGetInitShardState(numberOfShards, numOfNodes int) shard.State { |
||||
rand.Seed(int64(42)) |
||||
shardState := shard.State{} |
||||
for i := 0; i < numberOfShards; i++ { |
||||
sid := uint32(i) |
||||
com := shard.Committee{ShardID: sid} |
||||
for j := 0; j < numOfNodes; j++ { |
||||
nid := strconv.Itoa(int(rand.Int63())) |
||||
blsPubKey := [48]byte{} |
||||
copy(blsPubKey1[:], []byte(nid)) |
||||
com.NodeList = append(com.NodeList, shard.NodeID{ |
||||
EcdsaAddress: common.BytesToAddress([]byte(nid)), |
||||
BlsPublicKey: blsPubKey, |
||||
}) |
||||
} |
||||
shardState = append(shardState, com) |
||||
} |
||||
return shardState |
||||
} |
||||
|
||||
func fakeNewNodeList(seed int64) []shard.NodeID { |
||||
rand.Seed(seed) |
||||
numNewNodes := rand.Intn(10) |
||||
nodeList := []shard.NodeID{} |
||||
for i := 0; i < numNewNodes; i++ { |
||||
nid := strconv.Itoa(int(rand.Int63())) |
||||
blsPubKey := [48]byte{} |
||||
copy(blsPubKey1[:], []byte(nid)) |
||||
nodeList = append(nodeList, shard.NodeID{ |
||||
EcdsaAddress: common.BytesToAddress([]byte(nid)), |
||||
BlsPublicKey: blsPubKey, |
||||
}) |
||||
} |
||||
return nodeList |
||||
} |
||||
|
||||
func TestFakeNewNodeList(t *testing.T) { |
||||
nodeList := fakeNewNodeList(42) |
||||
fmt.Println("newNodeList: ", nodeList) |
||||
} |
||||
|
||||
func TestShuffle(t *testing.T) { |
||||
nodeList := []shard.NodeID{ |
||||
{EcdsaAddress: common.Address{0x12}, BlsPublicKey: blsPubKey1}, |
||||
{EcdsaAddress: common.Address{0x22}, BlsPublicKey: blsPubKey2}, |
||||
{EcdsaAddress: common.Address{0x32}, BlsPublicKey: blsPubKey3}, |
||||
{EcdsaAddress: common.Address{0x42}, BlsPublicKey: blsPubKey4}, |
||||
{EcdsaAddress: common.Address{0x52}, BlsPublicKey: blsPubKey5}, |
||||
{EcdsaAddress: common.Address{0x62}, BlsPublicKey: blsPubKey6}, |
||||
{EcdsaAddress: common.Address{0x72}, BlsPublicKey: blsPubKey7}, |
||||
{EcdsaAddress: common.Address{0x82}, BlsPublicKey: blsPubKey8}, |
||||
{EcdsaAddress: common.Address{0x92}, BlsPublicKey: blsPubKey9}, |
||||
{EcdsaAddress: common.Address{0x02}, BlsPublicKey: blsPubKey10}, |
||||
} |
||||
|
||||
cpList := []shard.NodeID{} |
||||
cpList = append(cpList, nodeList...) |
||||
Shuffle(nodeList) |
||||
cnt := 0 |
||||
for i := 0; i < 10; i++ { |
||||
if cpList[i] == nodeList[i] { |
||||
cnt++ |
||||
} |
||||
} |
||||
if cnt == 10 { |
||||
t.Error("Shuffle list is the same as original list") |
||||
} |
||||
return |
||||
} |
||||
|
||||
func TestSortCommitteeBySize(t *testing.T) { |
||||
shardState := fakeGetInitShardState(6, 10) |
||||
ss := &ShardingState{epoch: 1, rnd: 42, shardState: shardState, numShards: len(shardState)} |
||||
ss.sortCommitteeBySize() |
||||
for i := 0; i < ss.numShards-1; i++ { |
||||
assert.Equal(t, true, len(ss.shardState[i].NodeList) >= len(ss.shardState[i+1].NodeList)) |
||||
} |
||||
} |
||||
|
||||
func TestUpdateShardState(t *testing.T) { |
||||
shardState := fakeGetInitShardState(6, 10) |
||||
ss := &ShardingState{epoch: 1, rnd: 42, shardState: shardState, numShards: len(shardState)} |
||||
newNodeList := []shard.NodeID{ |
||||
{EcdsaAddress: common.Address{0x12}, BlsPublicKey: blsPubKey1}, |
||||
{EcdsaAddress: common.Address{0x22}, BlsPublicKey: blsPubKey2}, |
||||
{EcdsaAddress: common.Address{0x32}, BlsPublicKey: blsPubKey3}, |
||||
{EcdsaAddress: common.Address{0x42}, BlsPublicKey: blsPubKey4}, |
||||
{EcdsaAddress: common.Address{0x52}, BlsPublicKey: blsPubKey5}, |
||||
{EcdsaAddress: common.Address{0x62}, BlsPublicKey: blsPubKey6}, |
||||
} |
||||
|
||||
ss.Reshard(newNodeList, 0.2) |
||||
assert.Equal(t, 6, ss.numShards) |
||||
} |
||||
|
||||
func TestAssignNewNodes(t *testing.T) { |
||||
shardState := fakeGetInitShardState(2, 2) |
||||
ss := &ShardingState{epoch: 1, rnd: 42, shardState: shardState, numShards: len(shardState)} |
||||
newNodes := []shard.NodeID{ |
||||
{EcdsaAddress: common.Address{0x12}, BlsPublicKey: blsPubKey1}, |
||||
{EcdsaAddress: common.Address{0x22}, BlsPublicKey: blsPubKey2}, |
||||
{EcdsaAddress: common.Address{0x32}, BlsPublicKey: blsPubKey3}, |
||||
} |
||||
|
||||
ss.assignNewNodes(newNodes) |
||||
assert.Equal(t, 2, ss.numShards) |
||||
assert.Equal(t, 5, len(ss.shardState[0].NodeList)) |
||||
} |
@ -1,10 +0,0 @@ |
||||
package values |
||||
|
||||
const ( |
||||
// BeaconChainShardID is the ShardID of the BeaconChain
|
||||
BeaconChainShardID = 0 |
||||
// VotingPowerReduceBlockThreshold roughly corresponds to 3 hours
|
||||
VotingPowerReduceBlockThreshold = 1350 |
||||
// VotingPowerFullReduce roughly corresponds to 12 hours
|
||||
VotingPowerFullReduce = 4 * VotingPowerReduceBlockThreshold |
||||
) |
@ -0,0 +1,261 @@ |
||||
package committee |
||||
|
||||
import ( |
||||
"math/big" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/block" |
||||
common2 "github.com/harmony-one/harmony/internal/common" |
||||
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" |
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
"github.com/harmony-one/harmony/internal/params" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/numeric" |
||||
"github.com/harmony-one/harmony/shard" |
||||
staking "github.com/harmony-one/harmony/staking/types" |
||||
) |
||||
|
||||
// StateID means reading off whole network when using calls that accept
|
||||
// a shardID parameter
|
||||
const StateID = -1 |
||||
|
||||
// ValidatorList ..
|
||||
type ValidatorList interface { |
||||
Compute( |
||||
epoch *big.Int, config params.ChainConfig, reader StakingCandidatesReader, |
||||
) (shard.State, error) |
||||
ReadFromDB(epoch *big.Int, reader ChainReader) (shard.State, error) |
||||
} |
||||
|
||||
// PublicKeys per epoch
|
||||
type PublicKeys interface { |
||||
// If call shardID with StateID then only superCommittee is non-nil,
|
||||
// otherwise get back the shardSpecific slice as well.
|
||||
ComputePublicKeys( |
||||
epoch *big.Int, reader ChainReader, shardID int, |
||||
) (superCommittee, shardSpecific []*bls.PublicKey) |
||||
|
||||
ReadPublicKeysFromDB( |
||||
hash common.Hash, reader ChainReader, |
||||
) ([]*bls.PublicKey, error) |
||||
} |
||||
|
||||
// Reader ..
|
||||
type Reader interface { |
||||
PublicKeys |
||||
ValidatorList |
||||
} |
||||
|
||||
// StakingCandidatesReader ..
|
||||
type StakingCandidatesReader interface { |
||||
ValidatorInformation(addr common.Address) (*staking.Validator, error) |
||||
ValidatorStakingWithDelegation(addr common.Address) numeric.Dec |
||||
ValidatorCandidates() []common.Address |
||||
} |
||||
|
||||
// ChainReader is a subset of Engine.ChainReader, just enough to do assignment
|
||||
type ChainReader interface { |
||||
// ReadShardState retrieves sharding state given the epoch number.
|
||||
// This api reads the shard state cached or saved on the chaindb.
|
||||
// Thus, only should be used to read the shard state of the current chain.
|
||||
ReadShardState(epoch *big.Int) (shard.State, error) |
||||
// GetHeader retrieves a block header from the database by hash and number.
|
||||
GetHeaderByHash(common.Hash) *block.Header |
||||
// Config retrieves the blockchain's chain configuration.
|
||||
Config() *params.ChainConfig |
||||
} |
||||
|
||||
type partialStakingEnabled struct{} |
||||
|
||||
var ( |
||||
// WithStakingEnabled ..
|
||||
WithStakingEnabled Reader = partialStakingEnabled{} |
||||
) |
||||
|
||||
func preStakingEnabledCommittee(s shardingconfig.Instance) shard.State { |
||||
shardNum := int(s.NumShards()) |
||||
shardHarmonyNodes := s.NumHarmonyOperatedNodesPerShard() |
||||
shardSize := s.NumNodesPerShard() |
||||
hmyAccounts := s.HmyAccounts() |
||||
fnAccounts := s.FnAccounts() |
||||
shardState := shard.State{} |
||||
for i := 0; i < shardNum; i++ { |
||||
com := shard.Committee{ShardID: uint32(i)} |
||||
for j := 0; j < shardHarmonyNodes; j++ { |
||||
index := i + j*shardNum // The initial account to use for genesis nodes
|
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(hmyAccounts[index].BlsPublicKey) |
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
common2.ParseAddr(hmyAccounts[index].Address), |
||||
pubKey, |
||||
nil, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
// add FN runner's key
|
||||
for j := shardHarmonyNodes; j < shardSize; j++ { |
||||
index := i + (j-shardHarmonyNodes)*shardNum |
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(fnAccounts[index].BlsPublicKey) |
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
common2.ParseAddr(fnAccounts[index].Address), |
||||
pubKey, |
||||
nil, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
shardState = append(shardState, com) |
||||
} |
||||
return shardState |
||||
} |
||||
|
||||
func with400Stakers( |
||||
s shardingconfig.Instance, stakerReader StakingCandidatesReader, |
||||
) (shard.State, error) { |
||||
// TODO Nervous about this because overtime the list will become quite large
|
||||
candidates := stakerReader.ValidatorCandidates() |
||||
stakers := make([]*staking.Validator, len(candidates)) |
||||
for i := range candidates { |
||||
// TODO Should be using .ValidatorStakingWithDelegation, not implemented yet
|
||||
validator, err := stakerReader.ValidatorInformation(candidates[i]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
stakers[i] = validator |
||||
} |
||||
|
||||
sort.SliceStable( |
||||
stakers, |
||||
func(i, j int) bool { return stakers[i].Stake.Cmp(stakers[j].Stake) >= 0 }, |
||||
) |
||||
const sCount = 401 |
||||
top := stakers[:sCount] |
||||
shardCount := int(s.NumShards()) |
||||
superComm := make(shard.State, shardCount) |
||||
fillCount := make([]int, shardCount) |
||||
// TODO Finish this logic, not correct, need to operate EPoS on slot level,
|
||||
// not validator level
|
||||
|
||||
for i := 0; i < shardCount; i++ { |
||||
superComm[i] = shard.Committee{} |
||||
superComm[i].NodeList = make(shard.NodeIDList, s.NumNodesPerShard()) |
||||
} |
||||
|
||||
scratchPad := &bls.PublicKey{} |
||||
|
||||
for i := range top { |
||||
spot := int(top[i].Address.Big().Int64()) % shardCount |
||||
fillCount[spot]++ |
||||
// scratchPad.DeserializeHexStr()
|
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(scratchPad) |
||||
superComm[spot].NodeList = append( |
||||
superComm[spot].NodeList, |
||||
shard.NodeID{ |
||||
top[i].Address, |
||||
pubKey, |
||||
&shard.StakedMember{big.NewInt(0)}, |
||||
}, |
||||
) |
||||
} |
||||
|
||||
utils.Logger().Info().Ints("distribution of Stakers in Shards", fillCount) |
||||
return superComm, nil |
||||
} |
||||
|
||||
func (def partialStakingEnabled) ReadPublicKeysFromDB( |
||||
h common.Hash, reader ChainReader, |
||||
) ([]*bls.PublicKey, error) { |
||||
header := reader.GetHeaderByHash(h) |
||||
shardID := header.ShardID() |
||||
superCommittee, err := reader.ReadShardState(header.Epoch()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
subCommittee := superCommittee.FindCommitteeByID(shardID) |
||||
if subCommittee == nil { |
||||
return nil, ctxerror.New("cannot find shard in the shard state", |
||||
"blockNumber", header.Number(), |
||||
"shardID", header.ShardID(), |
||||
) |
||||
} |
||||
committerKeys := []*bls.PublicKey{} |
||||
|
||||
for i := range subCommittee.NodeList { |
||||
committerKey := new(bls.PublicKey) |
||||
err := subCommittee.NodeList[i].BlsPublicKey.ToLibBLSPublicKey(committerKey) |
||||
if err != nil { |
||||
return nil, ctxerror.New("cannot convert BLS public key", |
||||
"blsPublicKey", subCommittee.NodeList[i].BlsPublicKey).WithCause(err) |
||||
} |
||||
committerKeys = append(committerKeys, committerKey) |
||||
} |
||||
return committerKeys, nil |
||||
|
||||
return nil, nil |
||||
} |
||||
|
||||
// ReadPublicKeysFromChain produces publicKeys of entire supercommittee per epoch, optionally providing a
|
||||
// shard specific subcommittee
|
||||
func (def partialStakingEnabled) ComputePublicKeys( |
||||
epoch *big.Int, reader ChainReader, shardID int, |
||||
) ([]*bls.PublicKey, []*bls.PublicKey) { |
||||
config := reader.Config() |
||||
instance := shard.Schedule.InstanceForEpoch(epoch) |
||||
if !config.IsStaking(epoch) { |
||||
superComm := preStakingEnabledCommittee(instance) |
||||
spot := 0 |
||||
allIdentities := make([]*bls.PublicKey, int(instance.NumShards())*instance.NumNodesPerShard()) |
||||
for i := range superComm { |
||||
for j := range superComm[i].NodeList { |
||||
identity := &bls.PublicKey{} |
||||
superComm[i].NodeList[j].BlsPublicKey.ToLibBLSPublicKey(identity) |
||||
allIdentities[spot] = identity |
||||
spot++ |
||||
} |
||||
} |
||||
|
||||
if shardID == StateID { |
||||
return allIdentities, nil |
||||
} |
||||
|
||||
subCommittee := superComm.FindCommitteeByID(uint32(shardID)) |
||||
subCommitteeIdentities := make([]*bls.PublicKey, len(subCommittee.NodeList)) |
||||
spot = 0 |
||||
for i := range subCommittee.NodeList { |
||||
identity := &bls.PublicKey{} |
||||
subCommittee.NodeList[i].BlsPublicKey.ToLibBLSPublicKey(identity) |
||||
subCommitteeIdentities[spot] = identity |
||||
spot++ |
||||
} |
||||
|
||||
return allIdentities, subCommitteeIdentities |
||||
} |
||||
// TODO Implement for the staked case
|
||||
return nil, nil |
||||
} |
||||
|
||||
func (def partialStakingEnabled) ReadFromDB( |
||||
epoch *big.Int, reader ChainReader, |
||||
) (newSuperComm shard.State, err error) { |
||||
return reader.ReadShardState(epoch) |
||||
} |
||||
|
||||
// ReadFromComputation is single entry point for reading the State of the network
|
||||
func (def partialStakingEnabled) Compute( |
||||
epoch *big.Int, config params.ChainConfig, stakerReader StakingCandidatesReader, |
||||
) (newSuperComm shard.State, err error) { |
||||
instance := shard.Schedule.InstanceForEpoch(epoch) |
||||
if !config.IsStaking(epoch) { |
||||
return preStakingEnabledCommittee(instance), nil |
||||
} |
||||
return with400Stakers(instance, stakerReader) |
||||
} |
@ -0,0 +1,19 @@ |
||||
package shard |
||||
|
||||
import ( |
||||
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" |
||||
) |
||||
|
||||
const ( |
||||
// BeaconChainShardID is the ShardID of the BeaconChain
|
||||
BeaconChainShardID = 0 |
||||
) |
||||
|
||||
// TODO ek – Schedule should really be part of a general-purpose network
|
||||
// configuration. We are OK for the time being,
|
||||
// until the day we should let one node process join multiple networks.
|
||||
var ( |
||||
// Schedule is the sharding configuration schedule.
|
||||
// Depends on the type of the network. Defaults to the mainnet schedule.
|
||||
Schedule shardingconfig.Schedule = shardingconfig.MainnetSchedule |
||||
) |
Loading…
Reference in new issue