[committee] Factor out committee membership, provide entry for alternative committee membership (#1818)
* [committee] Move core.ShardingSchedule to shard.Schedule * [consensus] Remove redundant PublicKeys field of Consensus as Decider maintains that * [committee] Use committee package to pick PublicKeys * [committee] Use committee inplace of CalculateShardState * [committee] Remove core/resharding.go, complete usage of committee as implementation replacement * [committee] Address PR commentspull/1821/head
parent
4e628224e6
commit
e6a4fbea4f
@ -1,97 +0,0 @@ |
||||
package resharding |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
msg_pb "github.com/harmony-one/harmony/api/proto/message" |
||||
"github.com/harmony-one/harmony/core" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
) |
||||
|
||||
// Constants for resharding service.
|
||||
const ( |
||||
ReshardingCheckTime = time.Second |
||||
) |
||||
|
||||
// Service is the role conversion service.
|
||||
type Service struct { |
||||
stopChan chan struct{} |
||||
stoppedChan chan struct{} |
||||
messageChan chan *msg_pb.Message |
||||
beaconChain *core.BlockChain |
||||
} |
||||
|
||||
// New returns role conversion service.
|
||||
func New(beaconChain *core.BlockChain) *Service { |
||||
return &Service{beaconChain: beaconChain} |
||||
} |
||||
|
||||
// StartService starts role conversion service.
|
||||
func (s *Service) StartService() { |
||||
s.stopChan = make(chan struct{}) |
||||
s.stoppedChan = make(chan struct{}) |
||||
|
||||
s.Init() |
||||
s.Run(s.stopChan, s.stoppedChan) |
||||
} |
||||
|
||||
// Init initializes role conversion service.
|
||||
func (s *Service) Init() { |
||||
} |
||||
|
||||
// Run runs role conversion.
|
||||
func (s *Service) Run(stopChan chan struct{}, stoppedChan chan struct{}) { |
||||
go func() { |
||||
defer close(stoppedChan) |
||||
for { |
||||
select { |
||||
default: |
||||
utils.Logger().Info().Msg("Running role conversion") |
||||
// TODO: Write some logic here.
|
||||
s.DoService() |
||||
case <-stopChan: |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// DoService does role conversion.
|
||||
func (s *Service) DoService() { |
||||
tick := time.NewTicker(ReshardingCheckTime) |
||||
// Get current shard state hash.
|
||||
currentShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash() |
||||
for { |
||||
select { |
||||
case <-tick.C: |
||||
LatestShardStateHash := s.beaconChain.CurrentBlock().Header().ShardStateHash() |
||||
if currentShardStateHash != LatestShardStateHash { |
||||
// TODO(minhdoan): Add resharding logic later after modifying the resharding func as it current doesn't calculate the role (leader/validator)
|
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// StopService stops role conversion service.
|
||||
func (s *Service) StopService() { |
||||
utils.Logger().Info().Msg("Stopping role conversion service") |
||||
s.stopChan <- struct{}{} |
||||
<-s.stoppedChan |
||||
utils.Logger().Info().Msg("Role conversion stopped") |
||||
} |
||||
|
||||
// NotifyService notify service
|
||||
func (s *Service) NotifyService(params map[string]interface{}) { |
||||
return |
||||
} |
||||
|
||||
// SetMessageChan sets up message channel to service.
|
||||
func (s *Service) SetMessageChan(messageChan chan *msg_pb.Message) { |
||||
s.messageChan = messageChan |
||||
} |
||||
|
||||
// APIs for the services.
|
||||
func (s *Service) APIs() []rpc.API { |
||||
return nil |
||||
} |
@ -1,259 +0,0 @@ |
||||
package core |
||||
|
||||
import ( |
||||
"encoding/hex" |
||||
"errors" |
||||
"math/big" |
||||
"math/rand" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
common2 "github.com/harmony-one/harmony/internal/common" |
||||
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" |
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/shard" |
||||
) |
||||
|
||||
const ( |
||||
// GenesisEpoch is the number of the genesis epoch.
|
||||
GenesisEpoch = 0 |
||||
// CuckooRate is the percentage of nodes getting reshuffled in the second step of cuckoo resharding.
|
||||
CuckooRate = 0.1 |
||||
) |
||||
|
||||
// ShardingState is data structure hold the sharding state
|
||||
type ShardingState struct { |
||||
epoch uint64 // current epoch
|
||||
rnd uint64 // random seed for resharding
|
||||
numShards int // TODO ek – equal to len(shardState); remove this
|
||||
shardState shard.State |
||||
} |
||||
|
||||
// sortedCommitteeBySize will sort shards by size
|
||||
// Suppose there are N shards, the first N/2 larger shards are called active committees
|
||||
// the rest N/2 smaller committees are called inactive committees
|
||||
// actually they are all just normal shards
|
||||
// TODO: sort the committee weighted by total staking instead of shard size
|
||||
func (ss *ShardingState) sortCommitteeBySize() { |
||||
sort.Slice(ss.shardState, func(i, j int) bool { |
||||
return len(ss.shardState[i].NodeList) > len(ss.shardState[j].NodeList) |
||||
}) |
||||
} |
||||
|
||||
// assignNewNodes add new nodes into the N/2 active committees evenly
|
||||
func (ss *ShardingState) assignNewNodes(newNodeList []shard.NodeID) { |
||||
ss.sortCommitteeBySize() |
||||
numActiveShards := ss.numShards / 2 |
||||
Shuffle(newNodeList) |
||||
for i, nid := range newNodeList { |
||||
id := 0 |
||||
if numActiveShards > 0 { |
||||
id = i % numActiveShards |
||||
} |
||||
if id < len(ss.shardState) { |
||||
ss.shardState[id].NodeList = append(ss.shardState[id].NodeList, nid) |
||||
} else { |
||||
utils.Logger().Error().Int("id", id).Int("shardState Count", len(ss.shardState)).Msg("assignNewNodes index out of range") |
||||
} |
||||
} |
||||
} |
||||
|
||||
// cuckooResharding uses cuckoo rule to reshard X% of active committee(shards) into inactive committee(shards)
|
||||
func (ss *ShardingState) cuckooResharding(percent float64) { |
||||
numActiveShards := ss.numShards / 2 |
||||
kickedNodes := []shard.NodeID{} |
||||
for i := range ss.shardState { |
||||
if i >= numActiveShards { |
||||
break |
||||
} |
||||
numKicked := int(percent * float64(len(ss.shardState[i].NodeList))) |
||||
if numKicked == 0 { |
||||
numKicked++ // At least kick one node out
|
||||
} |
||||
length := len(ss.shardState[i].NodeList) |
||||
if length-numKicked <= 0 { |
||||
continue // Never empty a shard
|
||||
} |
||||
tmp := ss.shardState[i].NodeList[length-numKicked:] |
||||
kickedNodes = append(kickedNodes, tmp...) |
||||
ss.shardState[i].NodeList = ss.shardState[i].NodeList[:length-numKicked] |
||||
} |
||||
|
||||
Shuffle(kickedNodes) |
||||
numInactiveShards := ss.numShards - numActiveShards |
||||
for i, nid := range kickedNodes { |
||||
id := numActiveShards |
||||
if numInactiveShards > 0 { |
||||
id += i % numInactiveShards |
||||
} |
||||
ss.shardState[id].NodeList = append(ss.shardState[id].NodeList, nid) |
||||
} |
||||
} |
||||
|
||||
// Reshard will first add new nodes into shards, then use cuckoo rule to reshard to get new shard state
|
||||
func (ss *ShardingState) Reshard(newNodeList []shard.NodeID, percent float64) { |
||||
rand.Seed(int64(ss.rnd)) |
||||
ss.sortCommitteeBySize() |
||||
|
||||
// Take out and preserve leaders
|
||||
leaders := []shard.NodeID{} |
||||
for i := 0; i < ss.numShards; i++ { |
||||
if len(ss.shardState[i].NodeList) > 0 { |
||||
leaders = append(leaders, ss.shardState[i].NodeList[0]) |
||||
ss.shardState[i].NodeList = ss.shardState[i].NodeList[1:] |
||||
// Also shuffle the rest of the nodes
|
||||
Shuffle(ss.shardState[i].NodeList) |
||||
} |
||||
} |
||||
|
||||
ss.assignNewNodes(newNodeList) |
||||
ss.cuckooResharding(percent) |
||||
|
||||
// Put leader back
|
||||
if len(leaders) < ss.numShards { |
||||
utils.Logger().Error().Msg("Not enough leaders to assign to shards") |
||||
} |
||||
for i := 0; i < ss.numShards; i++ { |
||||
ss.shardState[i].NodeList = append([]shard.NodeID{leaders[i]}, ss.shardState[i].NodeList...) |
||||
} |
||||
} |
||||
|
||||
// Shuffle will shuffle the list with result uniquely determined by seed, assuming there is no repeat items in the list
|
||||
func Shuffle(list []shard.NodeID) { |
||||
// Sort to make sure everyone will generate the same with the same rand seed.
|
||||
sort.Slice(list, func(i, j int) bool { |
||||
return shard.CompareNodeIDByBLSKey(list[i], list[j]) == -1 |
||||
}) |
||||
rand.Shuffle(len(list), func(i, j int) { |
||||
list[i], list[j] = list[j], list[i] |
||||
}) |
||||
} |
||||
|
||||
// GetEpochFromBlockNumber calculates the epoch number the block belongs to
|
||||
func GetEpochFromBlockNumber(blockNumber uint64) uint64 { |
||||
return ShardingSchedule.CalcEpochNumber(blockNumber).Uint64() |
||||
} |
||||
|
||||
// GetShardingStateFromBlockChain will retrieve random seed and shard map from beacon chain for given a epoch
|
||||
func GetShardingStateFromBlockChain(bc *BlockChain, epoch *big.Int) (*ShardingState, error) { |
||||
if bc == nil { |
||||
return nil, errors.New("no blockchain is supplied to get shard state") |
||||
} |
||||
shardState, err := bc.ReadShardState(epoch) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
shardState = shardState.DeepCopy() |
||||
|
||||
// TODO(RJ,HB): use real randomness for resharding
|
||||
//blockNumber := GetBlockNumberFromEpoch(epoch.Uint64())
|
||||
//rndSeedBytes := bc.GetVdfByNumber(blockNumber)
|
||||
rndSeed := uint64(0) |
||||
|
||||
return &ShardingState{epoch: epoch.Uint64(), rnd: rndSeed, shardState: shardState, numShards: len(shardState)}, nil |
||||
} |
||||
|
||||
// CalculateNewShardState get sharding state from previous epoch and calculate sharding state for new epoch
|
||||
func CalculateNewShardState(bc *BlockChain, epoch *big.Int) (shard.State, error) { |
||||
if epoch.Cmp(big.NewInt(GenesisEpoch)) == 0 { |
||||
return CalculateInitShardState(), nil |
||||
} |
||||
prevEpoch := new(big.Int).Sub(epoch, common.Big1) |
||||
ss, err := GetShardingStateFromBlockChain(bc, prevEpoch) |
||||
if err != nil { |
||||
return nil, ctxerror.New("cannot retrieve previous sharding state"). |
||||
WithCause(err) |
||||
} |
||||
utils.Logger().Info().Float64("percentage", CuckooRate).Msg("Cuckoo Rate") |
||||
return ss.shardState, nil |
||||
} |
||||
|
||||
// TODO ek – shardingSchedule should really be part of a general-purpose network
|
||||
// configuration. We are OK for the time being,
|
||||
// until the day we should let one node process join multiple networks.
|
||||
|
||||
// ShardingSchedule is the sharding configuration schedule.
|
||||
// Depends on the type of the network. Defaults to the mainnet schedule.
|
||||
var ShardingSchedule shardingconfig.Schedule = shardingconfig.MainnetSchedule |
||||
|
||||
// CalculateInitShardState returns the initial shard state at genesis.
|
||||
func CalculateInitShardState() shard.State { |
||||
return CalculateShardState(big.NewInt(GenesisEpoch)) |
||||
} |
||||
|
||||
// CalculateShardState returns the shard state based on epoch number
|
||||
// This api for getting shard state is what should be used to get shard state regardless of
|
||||
// current chain dependency (ex. getting shard state from block header received during cross-shard transaction)
|
||||
func CalculateShardState(epoch *big.Int) shard.State { |
||||
utils.Logger().Info().Int64("epoch", epoch.Int64()).Msg("Get Shard State of Epoch.") |
||||
shardingConfig := ShardingSchedule.InstanceForEpoch(epoch) |
||||
shardNum := int(shardingConfig.NumShards()) |
||||
shardHarmonyNodes := shardingConfig.NumHarmonyOperatedNodesPerShard() |
||||
shardSize := shardingConfig.NumNodesPerShard() |
||||
hmyAccounts := shardingConfig.HmyAccounts() |
||||
fnAccounts := shardingConfig.FnAccounts() |
||||
|
||||
shardState := shard.State{} |
||||
for i := 0; i < shardNum; i++ { |
||||
com := shard.Committee{ShardID: uint32(i)} |
||||
for j := 0; j < shardHarmonyNodes; j++ { |
||||
index := i + j*shardNum // The initial account to use for genesis nodes
|
||||
|
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(hmyAccounts[index].BlsPublicKey) |
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
EcdsaAddress: common2.ParseAddr(hmyAccounts[index].Address), |
||||
BlsPublicKey: pubKey, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
|
||||
// add FN runner's key
|
||||
for j := shardHarmonyNodes; j < shardSize; j++ { |
||||
index := i + (j-shardHarmonyNodes)*shardNum |
||||
|
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(fnAccounts[index].BlsPublicKey) |
||||
|
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
EcdsaAddress: common2.ParseAddr(fnAccounts[index].Address), |
||||
BlsPublicKey: pubKey, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
shardState = append(shardState, com) |
||||
} |
||||
return shardState |
||||
} |
||||
|
||||
// CalculatePublicKeys returns the publickeys given epoch and shardID
|
||||
func CalculatePublicKeys(epoch *big.Int, shardID uint32) []*bls.PublicKey { |
||||
shardState := CalculateShardState(epoch) |
||||
|
||||
// Update validator public keys
|
||||
committee := shardState.FindCommitteeByID(shardID) |
||||
if committee == nil { |
||||
utils.Logger().Warn().Uint32("shardID", shardID).Uint64("epoch", epoch.Uint64()).Msg("Cannot find committee") |
||||
return nil |
||||
} |
||||
pubKeys := []*bls.PublicKey{} |
||||
for _, node := range committee.NodeList { |
||||
pubKey := &bls.PublicKey{} |
||||
pubKeyBytes := node.BlsPublicKey[:] |
||||
err := pubKey.Deserialize(pubKeyBytes) |
||||
if err != nil { |
||||
utils.Logger().Warn().Str("pubKeyBytes", hex.EncodeToString(pubKeyBytes)).Msg("Cannot Deserialize pubKey") |
||||
return nil |
||||
} |
||||
pubKeys = append(pubKeys, pubKey) |
||||
} |
||||
return pubKeys |
||||
} |
@ -1,12 +0,0 @@ |
||||
## Resharding |
||||
|
||||
In current design, the epoch is defined to be fixed length, the epoch length is a constant parameter BlocksPerEpoch. In future, it will be dynamically adjustable according to security parameter. During the epoch transition, suppose there are N shards, we sort the shards according to the size of active nodes (that had staking for next epoch). The first N/2 larger shards will be called active committees, and the last N/2 smaller shards will be called inactive committees. Don't be confused by |
||||
the name, they are all normal shards with same function. |
||||
|
||||
All the information about sharding will be stored in BeaconChain. A sharding state is defined as a map which maps each NodeID to the ShardID the node belongs to. Every node will have a unique NodeID and be mapped to one ShardID. At the beginning of a new epoch, the BeaconChain leader will propose a new block containing the new sharding state, the new sharding state is uniquely determined by the randomness generated by distributed randomness protocol. During the consensus process, all the validators will perform the same calculation and verify the proposed sharding state is valid. After consensus is reached, each node will write the new sharding state into the block. This block is called epoch block. In current code, it's the first block of each epoch in BeaconChain. |
||||
|
||||
The main function of resharding is CalculcateNewShardState. It will take 3 inputs: newNodeList, oldShardState, randomSeed and output newShardState. |
||||
The newNodeList will be retrieved from BeaconChain staking transaction during the previous epoch. The randomSeed and oldShardState is stored in previous epoch block. It should be noticed that the randomSeed generation currently is mocked. After the distributed randomness protocol(drand) is ready, the drand service will generate the random seed for resharding. |
||||
|
||||
The resharding process is as follows: we first get newNodeList from staking transactions from previous epoch and assign the new nodes evenly into the N/2 active committees. Then, we kick out X% of nodes from each active committees and put these kicked out nodes into inactive committees evenly. The percentage X roughly equals to the percentage of new nodes into active committee in order to balance the committee size. |
||||
|
@ -1,149 +0,0 @@ |
||||
package core |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/rand" |
||||
"strconv" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
|
||||
"github.com/harmony-one/harmony/shard" |
||||
|
||||
"github.com/stretchr/testify/assert" |
||||
) |
||||
|
||||
var ( |
||||
blsPubKey1 = [48]byte{} |
||||
blsPubKey2 = [48]byte{} |
||||
blsPubKey3 = [48]byte{} |
||||
blsPubKey4 = [48]byte{} |
||||
blsPubKey5 = [48]byte{} |
||||
blsPubKey6 = [48]byte{} |
||||
blsPubKey7 = [48]byte{} |
||||
blsPubKey8 = [48]byte{} |
||||
blsPubKey9 = [48]byte{} |
||||
blsPubKey10 = [48]byte{} |
||||
) |
||||
|
||||
func init() { |
||||
copy(blsPubKey1[:], []byte("random key 1")) |
||||
copy(blsPubKey2[:], []byte("random key 2")) |
||||
copy(blsPubKey3[:], []byte("random key 3")) |
||||
copy(blsPubKey4[:], []byte("random key 4")) |
||||
copy(blsPubKey5[:], []byte("random key 5")) |
||||
copy(blsPubKey6[:], []byte("random key 6")) |
||||
copy(blsPubKey7[:], []byte("random key 7")) |
||||
copy(blsPubKey8[:], []byte("random key 8")) |
||||
copy(blsPubKey9[:], []byte("random key 9")) |
||||
copy(blsPubKey10[:], []byte("random key 10")) |
||||
} |
||||
|
||||
func fakeGetInitShardState(numberOfShards, numOfNodes int) shard.State { |
||||
rand.Seed(int64(42)) |
||||
shardState := shard.State{} |
||||
for i := 0; i < numberOfShards; i++ { |
||||
sid := uint32(i) |
||||
com := shard.Committee{ShardID: sid} |
||||
for j := 0; j < numOfNodes; j++ { |
||||
nid := strconv.Itoa(int(rand.Int63())) |
||||
blsPubKey := [48]byte{} |
||||
copy(blsPubKey1[:], []byte(nid)) |
||||
com.NodeList = append(com.NodeList, shard.NodeID{ |
||||
EcdsaAddress: common.BytesToAddress([]byte(nid)), |
||||
BlsPublicKey: blsPubKey, |
||||
}) |
||||
} |
||||
shardState = append(shardState, com) |
||||
} |
||||
return shardState |
||||
} |
||||
|
||||
func fakeNewNodeList(seed int64) []shard.NodeID { |
||||
rand.Seed(seed) |
||||
numNewNodes := rand.Intn(10) |
||||
nodeList := []shard.NodeID{} |
||||
for i := 0; i < numNewNodes; i++ { |
||||
nid := strconv.Itoa(int(rand.Int63())) |
||||
blsPubKey := [48]byte{} |
||||
copy(blsPubKey1[:], []byte(nid)) |
||||
nodeList = append(nodeList, shard.NodeID{ |
||||
EcdsaAddress: common.BytesToAddress([]byte(nid)), |
||||
BlsPublicKey: blsPubKey, |
||||
}) |
||||
} |
||||
return nodeList |
||||
} |
||||
|
||||
func TestFakeNewNodeList(t *testing.T) { |
||||
nodeList := fakeNewNodeList(42) |
||||
fmt.Println("newNodeList: ", nodeList) |
||||
} |
||||
|
||||
func TestShuffle(t *testing.T) { |
||||
nodeList := []shard.NodeID{ |
||||
{EcdsaAddress: common.Address{0x12}, BlsPublicKey: blsPubKey1}, |
||||
{EcdsaAddress: common.Address{0x22}, BlsPublicKey: blsPubKey2}, |
||||
{EcdsaAddress: common.Address{0x32}, BlsPublicKey: blsPubKey3}, |
||||
{EcdsaAddress: common.Address{0x42}, BlsPublicKey: blsPubKey4}, |
||||
{EcdsaAddress: common.Address{0x52}, BlsPublicKey: blsPubKey5}, |
||||
{EcdsaAddress: common.Address{0x62}, BlsPublicKey: blsPubKey6}, |
||||
{EcdsaAddress: common.Address{0x72}, BlsPublicKey: blsPubKey7}, |
||||
{EcdsaAddress: common.Address{0x82}, BlsPublicKey: blsPubKey8}, |
||||
{EcdsaAddress: common.Address{0x92}, BlsPublicKey: blsPubKey9}, |
||||
{EcdsaAddress: common.Address{0x02}, BlsPublicKey: blsPubKey10}, |
||||
} |
||||
|
||||
cpList := []shard.NodeID{} |
||||
cpList = append(cpList, nodeList...) |
||||
Shuffle(nodeList) |
||||
cnt := 0 |
||||
for i := 0; i < 10; i++ { |
||||
if cpList[i] == nodeList[i] { |
||||
cnt++ |
||||
} |
||||
} |
||||
if cnt == 10 { |
||||
t.Error("Shuffle list is the same as original list") |
||||
} |
||||
return |
||||
} |
||||
|
||||
func TestSortCommitteeBySize(t *testing.T) { |
||||
shardState := fakeGetInitShardState(6, 10) |
||||
ss := &ShardingState{epoch: 1, rnd: 42, shardState: shardState, numShards: len(shardState)} |
||||
ss.sortCommitteeBySize() |
||||
for i := 0; i < ss.numShards-1; i++ { |
||||
assert.Equal(t, true, len(ss.shardState[i].NodeList) >= len(ss.shardState[i+1].NodeList)) |
||||
} |
||||
} |
||||
|
||||
func TestUpdateShardState(t *testing.T) { |
||||
shardState := fakeGetInitShardState(6, 10) |
||||
ss := &ShardingState{epoch: 1, rnd: 42, shardState: shardState, numShards: len(shardState)} |
||||
newNodeList := []shard.NodeID{ |
||||
{EcdsaAddress: common.Address{0x12}, BlsPublicKey: blsPubKey1}, |
||||
{EcdsaAddress: common.Address{0x22}, BlsPublicKey: blsPubKey2}, |
||||
{EcdsaAddress: common.Address{0x32}, BlsPublicKey: blsPubKey3}, |
||||
{EcdsaAddress: common.Address{0x42}, BlsPublicKey: blsPubKey4}, |
||||
{EcdsaAddress: common.Address{0x52}, BlsPublicKey: blsPubKey5}, |
||||
{EcdsaAddress: common.Address{0x62}, BlsPublicKey: blsPubKey6}, |
||||
} |
||||
|
||||
ss.Reshard(newNodeList, 0.2) |
||||
assert.Equal(t, 6, ss.numShards) |
||||
} |
||||
|
||||
func TestAssignNewNodes(t *testing.T) { |
||||
shardState := fakeGetInitShardState(2, 2) |
||||
ss := &ShardingState{epoch: 1, rnd: 42, shardState: shardState, numShards: len(shardState)} |
||||
newNodes := []shard.NodeID{ |
||||
{EcdsaAddress: common.Address{0x12}, BlsPublicKey: blsPubKey1}, |
||||
{EcdsaAddress: common.Address{0x22}, BlsPublicKey: blsPubKey2}, |
||||
{EcdsaAddress: common.Address{0x32}, BlsPublicKey: blsPubKey3}, |
||||
} |
||||
|
||||
ss.assignNewNodes(newNodes) |
||||
assert.Equal(t, 2, ss.numShards) |
||||
assert.Equal(t, 5, len(ss.shardState[0].NodeList)) |
||||
} |
@ -1,10 +0,0 @@ |
||||
package values |
||||
|
||||
const ( |
||||
// BeaconChainShardID is the ShardID of the BeaconChain
|
||||
BeaconChainShardID = 0 |
||||
// VotingPowerReduceBlockThreshold roughly corresponds to 3 hours
|
||||
VotingPowerReduceBlockThreshold = 1350 |
||||
// VotingPowerFullReduce roughly corresponds to 12 hours
|
||||
VotingPowerFullReduce = 4 * VotingPowerReduceBlockThreshold |
||||
) |
@ -0,0 +1,261 @@ |
||||
package committee |
||||
|
||||
import ( |
||||
"math/big" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/bls/ffi/go/bls" |
||||
"github.com/harmony-one/harmony/block" |
||||
common2 "github.com/harmony-one/harmony/internal/common" |
||||
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" |
||||
"github.com/harmony-one/harmony/internal/ctxerror" |
||||
"github.com/harmony-one/harmony/internal/params" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
"github.com/harmony-one/harmony/numeric" |
||||
"github.com/harmony-one/harmony/shard" |
||||
staking "github.com/harmony-one/harmony/staking/types" |
||||
) |
||||
|
||||
// StateID means reading off whole network when using calls that accept
|
||||
// a shardID parameter
|
||||
const StateID = -1 |
||||
|
||||
// MembershipList ..
|
||||
type MembershipList interface { |
||||
ReadFromComputation( |
||||
epoch *big.Int, config params.ChainConfig, reader StakingCandidatesReader, |
||||
) (shard.State, error) |
||||
ReadFromChain(epoch *big.Int, reader ChainReader) (shard.State, error) |
||||
} |
||||
|
||||
// PublicKeys per epoch
|
||||
type PublicKeys interface { |
||||
// If call shardID with StateID then only superCommittee is non-nil,
|
||||
// otherwise get back the shardSpecific slice as well.
|
||||
ComputePublicKeys( |
||||
epoch *big.Int, reader ChainReader, shardID int, |
||||
) (superCommittee, shardSpecific []*bls.PublicKey) |
||||
|
||||
ReadPublicKeysFromDB( |
||||
hash common.Hash, reader ChainReader, |
||||
) ([]*bls.PublicKey, error) |
||||
} |
||||
|
||||
// Reader ..
|
||||
type Reader interface { |
||||
PublicKeys |
||||
MembershipList |
||||
} |
||||
|
||||
// StakingCandidatesReader ..
|
||||
type StakingCandidatesReader interface { |
||||
ValidatorInformation(addr common.Address) (*staking.Validator, error) |
||||
ValidatorStakingWithDelegation(addr common.Address) numeric.Dec |
||||
ValidatorCandidates() []common.Address |
||||
} |
||||
|
||||
// ChainReader is a subset of Engine.ChainReader, just enough to do assignment
|
||||
type ChainReader interface { |
||||
// ReadShardState retrieves sharding state given the epoch number.
|
||||
// This api reads the shard state cached or saved on the chaindb.
|
||||
// Thus, only should be used to read the shard state of the current chain.
|
||||
ReadShardState(epoch *big.Int) (shard.State, error) |
||||
// GetHeader retrieves a block header from the database by hash and number.
|
||||
GetHeaderByHash(common.Hash) *block.Header |
||||
// Config retrieves the blockchain's chain configuration.
|
||||
Config() *params.ChainConfig |
||||
} |
||||
|
||||
type partialStakingEnabled struct{} |
||||
|
||||
var ( |
||||
// WithStakingEnabled ..
|
||||
WithStakingEnabled Reader = partialStakingEnabled{} |
||||
) |
||||
|
||||
func preStakingEnabledCommittee(s shardingconfig.Instance) shard.State { |
||||
shardNum := int(s.NumShards()) |
||||
shardHarmonyNodes := s.NumHarmonyOperatedNodesPerShard() |
||||
shardSize := s.NumNodesPerShard() |
||||
hmyAccounts := s.HmyAccounts() |
||||
fnAccounts := s.FnAccounts() |
||||
shardState := shard.State{} |
||||
for i := 0; i < shardNum; i++ { |
||||
com := shard.Committee{ShardID: uint32(i)} |
||||
for j := 0; j < shardHarmonyNodes; j++ { |
||||
index := i + j*shardNum // The initial account to use for genesis nodes
|
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(hmyAccounts[index].BlsPublicKey) |
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
common2.ParseAddr(hmyAccounts[index].Address), |
||||
pubKey, |
||||
nil, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
// add FN runner's key
|
||||
for j := shardHarmonyNodes; j < shardSize; j++ { |
||||
index := i + (j-shardHarmonyNodes)*shardNum |
||||
pub := &bls.PublicKey{} |
||||
pub.DeserializeHexStr(fnAccounts[index].BlsPublicKey) |
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(pub) |
||||
// TODO: directly read address for bls too
|
||||
curNodeID := shard.NodeID{ |
||||
common2.ParseAddr(fnAccounts[index].Address), |
||||
pubKey, |
||||
nil, |
||||
} |
||||
com.NodeList = append(com.NodeList, curNodeID) |
||||
} |
||||
shardState = append(shardState, com) |
||||
} |
||||
return shardState |
||||
} |
||||
|
||||
func with400Stakers( |
||||
s shardingconfig.Instance, stakerReader StakingCandidatesReader, |
||||
) (shard.State, error) { |
||||
// TODO Nervous about this because overtime the list will become quite large
|
||||
candidates := stakerReader.ValidatorCandidates() |
||||
stakers := make([]*staking.Validator, len(candidates)) |
||||
for i := range candidates { |
||||
// TODO Should be using .ValidatorStakingWithDelegation, not implemented yet
|
||||
validator, err := stakerReader.ValidatorInformation(candidates[i]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
stakers[i] = validator |
||||
} |
||||
|
||||
sort.SliceStable( |
||||
stakers, |
||||
func(i, j int) bool { return stakers[i].Stake.Cmp(stakers[j].Stake) >= 0 }, |
||||
) |
||||
const sCount = 401 |
||||
top := stakers[:sCount] |
||||
shardCount := int(s.NumShards()) |
||||
superComm := make(shard.State, shardCount) |
||||
fillCount := make([]int, shardCount) |
||||
// TODO Finish this logic, not correct, need to operate EPoS on slot level,
|
||||
// not validator level
|
||||
|
||||
for i := 0; i < shardCount; i++ { |
||||
superComm[i] = shard.Committee{} |
||||
superComm[i].NodeList = make(shard.NodeIDList, s.NumNodesPerShard()) |
||||
} |
||||
|
||||
scratchPad := &bls.PublicKey{} |
||||
|
||||
for i := range top { |
||||
spot := int(top[i].Address.Big().Int64()) % shardCount |
||||
fillCount[spot]++ |
||||
// scratchPad.DeserializeHexStr()
|
||||
pubKey := shard.BlsPublicKey{} |
||||
pubKey.FromLibBLSPublicKey(scratchPad) |
||||
superComm[spot].NodeList = append( |
||||
superComm[spot].NodeList, |
||||
shard.NodeID{ |
||||
top[i].Address, |
||||
pubKey, |
||||
&shard.StakedMember{big.NewInt(0)}, |
||||
}, |
||||
) |
||||
} |
||||
|
||||
utils.Logger().Info().Ints("distribution of Stakers in Shards", fillCount) |
||||
return superComm, nil |
||||
} |
||||
|
||||
func (def partialStakingEnabled) ReadPublicKeysFromDB( |
||||
h common.Hash, reader ChainReader, |
||||
) ([]*bls.PublicKey, error) { |
||||
header := reader.GetHeaderByHash(h) |
||||
shardID := header.ShardID() |
||||
superCommittee, err := reader.ReadShardState(header.Epoch()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
subCommittee := superCommittee.FindCommitteeByID(shardID) |
||||
if subCommittee == nil { |
||||
return nil, ctxerror.New("cannot find shard in the shard state", |
||||
"blockNumber", header.Number(), |
||||
"shardID", header.ShardID(), |
||||
) |
||||
} |
||||
committerKeys := []*bls.PublicKey{} |
||||
|
||||
for i := range subCommittee.NodeList { |
||||
committerKey := new(bls.PublicKey) |
||||
err := subCommittee.NodeList[i].BlsPublicKey.ToLibBLSPublicKey(committerKey) |
||||
if err != nil { |
||||
return nil, ctxerror.New("cannot convert BLS public key", |
||||
"blsPublicKey", subCommittee.NodeList[i].BlsPublicKey).WithCause(err) |
||||
} |
||||
committerKeys = append(committerKeys, committerKey) |
||||
} |
||||
return committerKeys, nil |
||||
|
||||
return nil, nil |
||||
} |
||||
|
||||
// ReadPublicKeysFromChain produces publicKeys of entire supercommittee per epoch, optionally providing a
|
||||
// shard specific subcommittee
|
||||
func (def partialStakingEnabled) ComputePublicKeys( |
||||
epoch *big.Int, reader ChainReader, shardID int, |
||||
) ([]*bls.PublicKey, []*bls.PublicKey) { |
||||
config := reader.Config() |
||||
instance := shard.Schedule.InstanceForEpoch(epoch) |
||||
if !config.IsStaking(epoch) { |
||||
superComm := preStakingEnabledCommittee(instance) |
||||
spot := 0 |
||||
allIdentities := make([]*bls.PublicKey, int(instance.NumShards())*instance.NumNodesPerShard()) |
||||
for i := range superComm { |
||||
for j := range superComm[i].NodeList { |
||||
identity := &bls.PublicKey{} |
||||
superComm[i].NodeList[j].BlsPublicKey.ToLibBLSPublicKey(identity) |
||||
allIdentities[spot] = identity |
||||
spot++ |
||||
} |
||||
} |
||||
|
||||
if shardID == StateID { |
||||
return allIdentities, nil |
||||
} |
||||
|
||||
subCommittee := superComm.FindCommitteeByID(uint32(shardID)) |
||||
subCommitteeIdentities := make([]*bls.PublicKey, len(subCommittee.NodeList)) |
||||
spot = 0 |
||||
for i := range subCommittee.NodeList { |
||||
identity := &bls.PublicKey{} |
||||
subCommittee.NodeList[i].BlsPublicKey.ToLibBLSPublicKey(identity) |
||||
subCommitteeIdentities[spot] = identity |
||||
spot++ |
||||
} |
||||
|
||||
return allIdentities, subCommitteeIdentities |
||||
} |
||||
// TODO Implement for the staked case
|
||||
return nil, nil |
||||
} |
||||
|
||||
func (def partialStakingEnabled) ReadFromChain( |
||||
epoch *big.Int, reader ChainReader, |
||||
) (newSuperComm shard.State, err error) { |
||||
return reader.ReadShardState(epoch) |
||||
} |
||||
|
||||
// ReadFromComputation is single entry point for reading the State of the network
|
||||
func (def partialStakingEnabled) ReadFromComputation( |
||||
epoch *big.Int, config params.ChainConfig, stakerReader StakingCandidatesReader, |
||||
) (newSuperComm shard.State, err error) { |
||||
instance := shard.Schedule.InstanceForEpoch(epoch) |
||||
if !config.IsStaking(epoch) { |
||||
return preStakingEnabledCommittee(instance), nil |
||||
} |
||||
return with400Stakers(instance, stakerReader) |
||||
} |
@ -0,0 +1,19 @@ |
||||
package shard |
||||
|
||||
import ( |
||||
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" |
||||
) |
||||
|
||||
const ( |
||||
// BeaconChainShardID is the ShardID of the BeaconChain
|
||||
BeaconChainShardID = 0 |
||||
) |
||||
|
||||
// TODO ek – Schedule should really be part of a general-purpose network
|
||||
// configuration. We are OK for the time being,
|
||||
// until the day we should let one node process join multiple networks.
|
||||
var ( |
||||
// Schedule is the sharding configuration schedule.
|
||||
// Depends on the type of the network. Defaults to the mainnet schedule.
|
||||
Schedule shardingconfig.Schedule = shardingconfig.MainnetSchedule |
||||
) |
Loading…
Reference in new issue