|
|
|
package node
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/harmony-one/harmony/consensus"
|
|
|
|
"github.com/harmony-one/harmony/consensus/quorum"
|
|
|
|
"github.com/harmony-one/harmony/core"
|
|
|
|
"github.com/harmony-one/harmony/crypto/bls"
|
|
|
|
"github.com/harmony-one/harmony/internal/chain"
|
|
|
|
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
|
|
|
|
"github.com/harmony-one/harmony/internal/registry"
|
|
|
|
"github.com/harmony-one/harmony/internal/shardchain"
|
|
|
|
"github.com/harmony-one/harmony/internal/utils"
|
|
|
|
"github.com/harmony-one/harmony/multibls"
|
|
|
|
"github.com/harmony-one/harmony/p2p"
|
|
|
|
"github.com/harmony-one/harmony/shard"
|
|
|
|
"github.com/multiformats/go-multiaddr"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
)
|
|
|
|
|
|
|
|
var testDBFactory = &shardchain.MemDBFactory{}
|
|
|
|
|
|
|
|
func TestNewNode(t *testing.T) {
|
|
|
|
blsKey := bls.RandPrivateKey()
|
|
|
|
pubKey := blsKey.GetPublicKey()
|
|
|
|
leader := p2p.Peer{IP: "127.0.0.1", Port: "8882", ConsensusPubKey: pubKey}
|
|
|
|
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
|
|
|
|
host, err := p2p.NewHost(p2p.HostConfig{
|
|
|
|
Self: &leader,
|
|
|
|
BLSKey: priKey,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("newhost failure: %v", err)
|
|
|
|
}
|
|
|
|
engine := chain.NewEngine()
|
[rpc][availability][apr] Richer validator information, implement APR, unify EPoS computation, remove fall 2019 tech debt (#2484)
* [rpc][validator] Extend hmy blockchain validator information
* [availability] Optimize bump count
* [staking][validator][rpc] Remove validator stats rpc, fold into validator information, make existing pattern default behavior
* [slash] Reimplement SetDifference
* [reward][engine][network] Remove bad API from fall, begin setup for Per validator awards
* [header] Custom Marshal header for downstream, remove dev code
* [effective][committee] Factor out EPoS round of computation thereby unification in codebase of EPoS
* [unit-test] Fix semantically wrong validator unit tests, punt on maxBLS key wrt tx-pool test
* [reward] Use excellent singleflight package for caching lookup of subcommittees
* [apr][reward] Begin APR package itself, iterate on iterface signatures
* [reward] Handle possible error from singleflight
* [rpc][validator][reward] Adjust RPC committees, singleflight on votingPower, foldStats into Validator Information
* [apr] Stub out computation of APR
* [effective][committee] Upgrade SlotPurchase with named fields, provide marshal
* [effective] Update Tests
* [blockchain] TODO Remove the validators no longer in committee
* [validator][effective] More expressive string representation of eligibilty, ValidatorRPC explicit say if in committee now
* [rpc] Median-stake more semantic meaningful
* [validator] Iterate on semantic meaning of JSON representation
* [offchain] Make validator stats return explicit error
* [availability] Small typo
* [rpc] Quick visual hack until fix delete out kicked out validators
* [offchain] Delete validator from offchain that lost their slot
* [apr] Forgot to update interface signature
* [apr] Mul instead of Div
* [protocol][validator] Fold block reward accum per vaidator into validator-wrapper, off-chain => on-chain
* [votepower] Refactor votepower Roster, simplify aggregation of network wide rosters
* [votepower][shard] Adjust roster, optimize usage of BLSPublicKey as key, use MarshalText trick
* [shard] Granular errors
* [votepower][validator] Unify votepower data structure with off-chain usage
* [votepower][consensus][validator] Further simplify and unify votepower with off-chain, validator stats
* [votepower] Use RJs naming convention group,overall
* [votepower] Remove Println, do keep enforcing order
* [effective][reward] Expand semantics of eligibility as it was overloaded and confusing, evict old voting power computations
* [apr] Adjust json field name
* [votepower] Only aggregate on external validator
* [votepower] Mistake on aggregation, custom presentation network-wide
* [rpc][validator][availability] Remove parameter, take into account empty snapshot
* [apr] Use snapshots from two, one epochs ago. Still have question on header
* [apr] Use GetHeaderByNumber for the header needed for time stamp
* [chain] Evict > 3 epoch old voting power
* [blockchain] Leave Delete Validator snapshot as TODO
* [validator][rpc][effective] Undo changes to Protocol field, use virtual construct at RPC layer for meaning
* [project] Address PR comments
* [committee][rpc] Move +1 to computation of epos round rather than hack mutation
* [reward] Remove entire unnecessary loop, hook on AddReward. Remove unnecessary new big int
* [votepower][rpc][validator] Stick with numeric.Dec for token involved with computation, expose accumulate block-reward in RPC
* [effective][committee] Track the candidates for the EPoS auction, RPC median-stake benefits
* [node] Add hack way to get real error reason of why cannot load shardchain
* [consensus] Expand log on current issue on nil block
* [apr] Do the actual call to compute for validator's APR
* [committee] Wrap SlotOrder with validator address, manifests in median-stake RPC
* [apr] Incorrect error handle order
* [quorum] Remove incorrect compare on bls Key, (typo), remove redundant error check
* [shard] Add log if stakedSlots is 0
* [apr] More sanity check on div by zero, more lenient on error when dont have historical data yet
* [committee] Remove + 1 on seat count
* [apr] Use int64() directly
* [apr] Log when odd empty nil header
* [apr] Do not crash on empty header, figure out later
5 years ago
|
|
|
decider := quorum.NewDecider(
|
|
|
|
quorum.SuperMajorityVote, shard.BeaconChainShardID,
|
|
|
|
)
|
|
|
|
chainconfig := nodeconfig.GetShardConfig(shard.BeaconChainShardID).GetNetworkType().ChainConfig()
|
|
|
|
collection := shardchain.NewCollection(
|
|
|
|
nil, testDBFactory, &core.GenesisInitializer{NetworkType: nodeconfig.GetShardConfig(shard.BeaconChainShardID).GetNetworkType()}, engine, &chainconfig,
|
|
|
|
)
|
|
|
|
blockchain, err := collection.ShardChain(shard.BeaconChainShardID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("cannot get blockchain")
|
|
|
|
}
|
|
|
|
reg := registry.New().SetBlockchain(blockchain)
|
|
|
|
consensus, err := consensus.New(
|
|
|
|
host, shard.BeaconChainShardID, multibls.GetPrivateKeys(blsKey), reg, decider, 3, false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Cannot craeate consensus: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
node := New(host, consensus, engine, collection, nil, nil, nil, nil, nil, reg)
|
|
|
|
if node.Consensus == nil {
|
|
|
|
t.Error("Consensus is not initialized for the node")
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.Blockchain() == nil {
|
|
|
|
t.Error("Blockchain is not initialized for the node")
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.Blockchain().CurrentBlock() == nil {
|
|
|
|
t.Error("Genesis block is not initialized for the node")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDNSSyncingPeerProvider(t *testing.T) {
|
|
|
|
t.Run("Happy", func(t *testing.T) {
|
|
|
|
addrs := make([]multiaddr.Multiaddr, 0)
|
|
|
|
p := NewDNSSyncingPeerProvider("example.com", "1234", addrs)
|
|
|
|
lookupCount := 0
|
|
|
|
lookupName := ""
|
|
|
|
p.lookupHost = func(name string) (addrs []string, err error) {
|
|
|
|
lookupCount++
|
|
|
|
lookupName = name
|
|
|
|
return []string{"1.2.3.4", "5.6.7.8"}, nil
|
|
|
|
}
|
|
|
|
expectedPeers := []p2p.Peer{
|
|
|
|
{IP: "1.2.3.4", Port: "1234"},
|
|
|
|
{IP: "5.6.7.8", Port: "1234"},
|
|
|
|
}
|
|
|
|
actualPeers, err := p.SyncingPeers( /*shardID*/ 3)
|
|
|
|
if assert.NoError(t, err) {
|
|
|
|
assert.Equal(t, actualPeers, expectedPeers)
|
|
|
|
}
|
|
|
|
assert.Equal(t, lookupCount, 1)
|
|
|
|
assert.Equal(t, lookupName, "s3.example.com")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("SyncingPeers returned non-nil error %#v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("LookupError", func(t *testing.T) {
|
|
|
|
addrs := make([]multiaddr.Multiaddr, 0)
|
|
|
|
p := NewDNSSyncingPeerProvider("example.com", "1234", addrs)
|
|
|
|
p.lookupHost = func(_ string) ([]string, error) {
|
|
|
|
return nil, errors.New("omg")
|
|
|
|
}
|
|
|
|
_, actualErr := p.SyncingPeers( /*shardID*/ 3)
|
|
|
|
assert.Error(t, actualErr)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestLocalSyncingPeerProvider(t *testing.T) {
|
|
|
|
t.Run("BeaconChain", func(t *testing.T) {
|
|
|
|
p := makeLocalSyncingPeerProvider()
|
|
|
|
expectedBeaconPeers := []p2p.Peer{
|
|
|
|
{IP: "127.0.0.1", Port: "6000"},
|
|
|
|
{IP: "127.0.0.1", Port: "6002"},
|
|
|
|
{IP: "127.0.0.1", Port: "6004"},
|
|
|
|
}
|
|
|
|
if actualPeers, err := p.SyncingPeers(0); assert.NoError(t, err) {
|
|
|
|
assert.ElementsMatch(t, actualPeers, expectedBeaconPeers)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("Shard1Chain", func(t *testing.T) {
|
|
|
|
p := makeLocalSyncingPeerProvider()
|
|
|
|
expectedShard1Peers := []p2p.Peer{
|
|
|
|
// port 6001 omitted because self
|
|
|
|
{IP: "127.0.0.1", Port: "6003"},
|
|
|
|
{IP: "127.0.0.1", Port: "6005"},
|
|
|
|
}
|
|
|
|
if actualPeers, err := p.SyncingPeers(1); assert.NoError(t, err) {
|
|
|
|
assert.ElementsMatch(t, actualPeers, expectedShard1Peers)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("InvalidShard", func(t *testing.T) {
|
|
|
|
p := makeLocalSyncingPeerProvider()
|
|
|
|
_, err := p.SyncingPeers(999)
|
|
|
|
assert.Error(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeLocalSyncingPeerProvider() *LocalSyncingPeerProvider {
|
|
|
|
return NewLocalSyncingPeerProvider(6000, 6001, 2, 3)
|
|
|
|
}
|