Merge pull request #4487 from harmony-one/dev

Release Candidate 2023.2.7 (dev -> main)
pull/4541/head v2023.2.7
Casey Gardiner 1 year ago committed by GitHub
commit 1b9614ba24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 59
      .mergify.yml
  2. 2
      api/service/legacysync/syncing_test.go
  3. 12
      api/service/stagedstreamsync/const.go
  4. 10
      api/service/stagedstreamsync/default_stages.go
  5. 39
      api/service/stagedstreamsync/downloader.go
  6. 5
      api/service/stagedstreamsync/downloaders.go
  7. 5
      api/service/stagedstreamsync/service.go
  8. 6
      api/service/stagedstreamsync/short_range_helper.go
  9. 10
      api/service/stagedstreamsync/stage_bodies.go
  10. 36
      api/service/stagedstreamsync/stage_epoch.go
  11. 109
      api/service/stagedstreamsync/stage_lastmile.go
  12. 35
      api/service/stagedstreamsync/stage_short_range.go
  13. 12
      api/service/stagedstreamsync/stage_state.go
  14. 200
      api/service/stagedstreamsync/staged_stream_sync.go
  15. 1
      api/service/stagedstreamsync/stages.go
  16. 139
      api/service/stagedstreamsync/syncing.go
  17. 7
      api/service/stagedsync/stagedsync.go
  18. 2
      api/service/stagedsync/syncing.go
  19. 1
      cmd/harmony/default.go
  20. 7
      cmd/harmony/main.go
  21. 9
      consensus/consensus_v2.go
  22. 2
      consensus/validator.go
  23. 70
      core/blockchain_impl.go
  24. 8
      core/blockchain_leader_rotation.go
  25. 6
      core/state_processor.go
  26. 83
      core_test/shardchain_test.go
  27. 1
      internal/configs/harmony/harmony.go
  28. 44
      internal/configs/node/config.go
  29. 6
      node/node_handler_test.go
  30. 6
      node/node_newblock_test.go
  31. 10
      node/node_syncing.go
  32. 4
      p2p/host.go
  33. 4
      p2p/stream/protocols/sync/protocol.go
  34. 2
      p2p/stream/types/utils.go
  35. 2
      scripts/travis_go_checker.sh

@ -0,0 +1,59 @@
pull_request_rules:
- name: Squash merge rule to dev
conditions:
# applied for merge to the dev branch
- base=dev
# no unresolved threads
- "#review-threads-unresolved=0"
# Approved by two reviewers
- "#approved-reviews-by>=2"
# no unverified commit
- "#commits-unverified=0"
# Travis ci succeeded
- "check-success=Travis CI - Pull Request"
# git guardian succeeded
- "check-success=GitGuardian Security Checks"
# PR is not a draft
- -draft
# PR is not conflicting with the base branch
- -conflict
# conditions to avoid auto merge mistakes
# PR title doesn't have wip (not case sensitive)
- -title~=(?i)wip
# PR doesn't have WIP label (not case sensitive)
- label!=(?i)wip
# ready-to-merge is required to trigger the merge
- label=ready-to-merge
actions:
merge:
method: squash
- name: merge rule to main
conditions:
# from the dev branch : no direct PR to main
- head=dev
# applied for merge to the dev branch
- base=main
# no unresolved threads
- "#review-threads-unresolved=0"
# Approved by two reviewers
- "#approved-reviews-by>=2"
# no unverified commit
- "#commits-unverified=0"
# Travis ci succeeded
- "check-success=Travis CI - Pull Request"
# git guardian succeeded
- "check-success=GitGuardian Security Checks"
# PR is not a draft
- -draft
# PR is not conflicting with the base branch
- -conflict
# conditions to avoid auto merge mistakes
# PR title doesn't have wip (not case sensitive)
- -title~=(?i)wip
# PR doesn't have WIP label (not case sensitive)
- label!=(?i)wip
# ready-to-merge is required to trigger the merge
- label=ready-to-merge
actions:
merge:
method: merge

@ -12,7 +12,7 @@ import (
"time" "time"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
peer "github.com/libp2p/go-libp2p-core/peer" peer "github.com/libp2p/go-libp2p/core/peer"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/api/service/legacysync/downloader" "github.com/harmony-one/harmony/api/service/legacysync/downloader"

@ -14,7 +14,10 @@ const (
BlockByHashesUpperCap int = 10 // number of get blocks by hashes upper cap BlockByHashesUpperCap int = 10 // number of get blocks by hashes upper cap
BlockByHashesLowerCap int = 3 // number of get blocks by hashes lower cap BlockByHashesLowerCap int = 3 // number of get blocks by hashes lower cap
LastMileBlocksThreshold int = 10 LastMileBlocksThreshold int = 10
SyncLoopBatchSize uint32 = 30 // maximum size for one query of block hashes
VerifyHeaderBatchSize uint64 = 100 // block chain header verification batch size (not used for now)
LastMileBlocksSize = 50
// SoftQueueCap is the soft cap of size in resultQueue. When the queue size is larger than this limit, // SoftQueueCap is the soft cap of size in resultQueue. When the queue size is larger than this limit,
// no more request will be assigned to workers to wait for InsertChain to finish. // no more request will be assigned to workers to wait for InsertChain to finish.
@ -50,8 +53,15 @@ type (
// config for beacon config // config for beacon config
BHConfig *BeaconHelperConfig BHConfig *BeaconHelperConfig
// use memory db
UseMemDB bool
// log the stage progress // log the stage progress
LogProgress bool LogProgress bool
// logs every single process and error to help debugging stream sync
// DebugMode is not accessible to the end user and is only an aid for development
DebugMode bool
} }
// BeaconHelperConfig is the extra config used for beaconHelper which uses // BeaconHelperConfig is the extra config used for beaconHelper which uses

@ -15,11 +15,13 @@ var DefaultForwardOrder = ForwardOrder{
BlockBodies, BlockBodies,
// Stages below don't use Internet // Stages below don't use Internet
States, States,
LastMile,
Finish, Finish,
} }
var DefaultRevertOrder = RevertOrder{ var DefaultRevertOrder = RevertOrder{
Finish, Finish,
LastMile,
States, States,
BlockBodies, BlockBodies,
ShortRange, ShortRange,
@ -29,6 +31,7 @@ var DefaultRevertOrder = RevertOrder{
var DefaultCleanUpOrder = CleanUpOrder{ var DefaultCleanUpOrder = CleanUpOrder{
Finish, Finish,
LastMile,
States, States,
BlockBodies, BlockBodies,
ShortRange, ShortRange,
@ -42,6 +45,7 @@ func DefaultStages(ctx context.Context,
srCfg StageShortRangeCfg, srCfg StageShortRangeCfg,
bodiesCfg StageBodiesCfg, bodiesCfg StageBodiesCfg,
statesCfg StageStatesCfg, statesCfg StageStatesCfg,
lastMileCfg StageLastMileCfg,
finishCfg StageFinishCfg, finishCfg StageFinishCfg,
) []*Stage { ) []*Stage {
@ -50,6 +54,7 @@ func DefaultStages(ctx context.Context,
handlerStageEpochSync := NewStageEpoch(seCfg) handlerStageEpochSync := NewStageEpoch(seCfg)
handlerStageBodies := NewStageBodies(bodiesCfg) handlerStageBodies := NewStageBodies(bodiesCfg)
handlerStageStates := NewStageStates(statesCfg) handlerStageStates := NewStageStates(statesCfg)
handlerStageLastMile := NewStageLastMile(lastMileCfg)
handlerStageFinish := NewStageFinish(finishCfg) handlerStageFinish := NewStageFinish(finishCfg)
return []*Stage{ return []*Stage{
@ -78,6 +83,11 @@ func DefaultStages(ctx context.Context,
Description: "Update Blockchain State", Description: "Update Blockchain State",
Handler: handlerStageStates, Handler: handlerStageStates,
}, },
{
ID: LastMile,
Description: "update status for blocks after sync and update last mile blocks as well",
Handler: handlerStageLastMile,
},
{ {
ID: Finish, ID: Finish,
Description: "Finalize Changes", Description: "Finalize Changes",

@ -8,6 +8,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
@ -37,7 +38,7 @@ type (
) )
// NewDownloader creates a new downloader // NewDownloader creates a new downloader
func NewDownloader(host p2p.Host, bc core.BlockChain, dbDir string, isBeaconNode bool, config Config) *Downloader { func NewDownloader(host p2p.Host, bc core.BlockChain, consensus *consensus.Consensus, dbDir string, isBeaconNode bool, config Config) *Downloader {
config.fixValues() config.fixValues()
sp := sync.NewProtocol(sync.Config{ sp := sync.NewProtocol(sync.Config{
@ -67,8 +68,8 @@ func NewDownloader(host p2p.Host, bc core.BlockChain, dbDir string, isBeaconNode
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
//TODO: use mem db should be in config file // create an instance of staged sync for the downloader
stagedSyncInstance, err := CreateStagedSync(ctx, bc, dbDir, false, isBeaconNode, sp, config, logger, config.LogProgress) stagedSyncInstance, err := CreateStagedSync(ctx, bc, consensus, dbDir, isBeaconNode, sp, config, logger)
if err != nil { if err != nil {
cancel() cancel()
return nil return nil
@ -189,6 +190,7 @@ func (d *Downloader) waitForBootFinish() {
func (d *Downloader) loop() { func (d *Downloader) loop() {
ticker := time.NewTicker(10 * time.Second) ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop() defer ticker.Stop()
// for shard chain and beacon chain node, first we start with initSync=true to // for shard chain and beacon chain node, first we start with initSync=true to
// make sure it goes through the long range sync first. // make sure it goes through the long range sync first.
// for epoch chain we do only need to go through epoch sync process // for epoch chain we do only need to go through epoch sync process
@ -208,7 +210,8 @@ func (d *Downloader) loop() {
go trigger() go trigger()
case <-d.downloadC: case <-d.downloadC:
addedBN, err := d.stagedSyncInstance.doSync(d.ctx, initSync) bnBeforeSync := d.bc.CurrentBlock().NumberU64()
estimatedHeight, addedBN, err := d.stagedSyncInstance.doSync(d.ctx, initSync)
if err != nil { if err != nil {
//TODO: if there is a bad block which can't be resolved //TODO: if there is a bad block which can't be resolved
if d.stagedSyncInstance.invalidBlock.Active { if d.stagedSyncInstance.invalidBlock.Active {
@ -216,13 +219,14 @@ func (d *Downloader) loop() {
// if many streams couldn't solve it, then that's an unresolvable bad block // if many streams couldn't solve it, then that's an unresolvable bad block
if numTriedStreams >= d.config.InitStreams { if numTriedStreams >= d.config.InitStreams {
if !d.stagedSyncInstance.invalidBlock.IsLogged { if !d.stagedSyncInstance.invalidBlock.IsLogged {
fmt.Println("unresolvable bad block:", d.stagedSyncInstance.invalidBlock.Number) d.logger.Error().
Uint64("bad block number", d.stagedSyncInstance.invalidBlock.Number).
Msg(WrapStagedSyncMsg("unresolvable bad block"))
d.stagedSyncInstance.invalidBlock.IsLogged = true d.stagedSyncInstance.invalidBlock.IsLogged = true
} }
//TODO: if we don't have any new or untried stream in the list, sleep or panic //TODO: if we don't have any new or untried stream in the list, sleep or panic
} }
} }
// If any error happens, sleep 5 seconds and retry // If any error happens, sleep 5 seconds and retry
d.logger.Error(). d.logger.Error().
Err(err). Err(err).
@ -242,16 +246,27 @@ func (d *Downloader) loop() {
Uint32("shard", d.bc.ShardID()). Uint32("shard", d.bc.ShardID()).
Msg(WrapStagedSyncMsg("sync finished")) Msg(WrapStagedSyncMsg("sync finished"))
} }
// If block number has been changed, trigger another sync
if addedBN != 0 { if addedBN != 0 {
// If block number has been changed, trigger another sync
go trigger() go trigger()
// try to add last mile from pub-sub (blocking)
if d.bh != nil {
d.bh.insertSync()
}
} }
// try to add last mile from pub-sub (blocking) // if last doSync needed only to add a few blocks less than LastMileBlocksThreshold and
if d.bh != nil { // the node is fully synced now, then switch to short range
d.bh.insertSync() // the reason why we need to check distanceBeforeSync is because, if it was long distance,
// very likely, there are a couple of new blocks have been added to the other nodes which
// we should still stay in long range and check them.
bnAfterSync := d.bc.CurrentBlock().NumberU64()
distanceBeforeSync := estimatedHeight - bnBeforeSync
distanceAfterSync := estimatedHeight - bnAfterSync
if estimatedHeight > 0 && addedBN > 0 &&
distanceBeforeSync <= uint64(LastMileBlocksThreshold) &&
distanceAfterSync <= uint64(LastMileBlocksThreshold) {
initSync = false
} }
initSync = false
case <-d.closeC: case <-d.closeC:
return return

@ -2,6 +2,7 @@ package stagedstreamsync
import ( import (
"github.com/harmony-one/abool" "github.com/harmony-one/abool"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
) )
@ -15,7 +16,7 @@ type Downloaders struct {
} }
// NewDownloaders creates Downloaders for sync of multiple blockchains // NewDownloaders creates Downloaders for sync of multiple blockchains
func NewDownloaders(host p2p.Host, bcs []core.BlockChain, dbDir string, config Config) *Downloaders { func NewDownloaders(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, dbDir string, config Config) *Downloaders {
ds := make(map[uint32]*Downloader) ds := make(map[uint32]*Downloader)
isBeaconNode := len(bcs) == 1 isBeaconNode := len(bcs) == 1
for _, bc := range bcs { for _, bc := range bcs {
@ -25,7 +26,7 @@ func NewDownloaders(host p2p.Host, bcs []core.BlockChain, dbDir string, config C
if _, ok := ds[bc.ShardID()]; ok { if _, ok := ds[bc.ShardID()]; ok {
continue continue
} }
ds[bc.ShardID()] = NewDownloader(host, bc, dbDir, isBeaconNode, config) ds[bc.ShardID()] = NewDownloader(host, bc, consensus, dbDir, isBeaconNode, config)
} }
return &Downloaders{ return &Downloaders{
ds: ds, ds: ds,

@ -1,6 +1,7 @@
package stagedstreamsync package stagedstreamsync
import ( import (
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/p2p"
) )
@ -11,9 +12,9 @@ type StagedStreamSyncService struct {
} }
// NewService creates a new downloader service // NewService creates a new downloader service
func NewService(host p2p.Host, bcs []core.BlockChain, config Config, dbDir string) *StagedStreamSyncService { func NewService(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, config Config, dbDir string) *StagedStreamSyncService {
return &StagedStreamSyncService{ return &StagedStreamSyncService{
Downloaders: NewDownloaders(host, bcs, dbDir, config), Downloaders: NewDownloaders(host, bcs, consensus, dbDir, config),
} }
} }

@ -207,6 +207,12 @@ func (sh *srHelper) removeStreams(sts []sttypes.StreamID) {
} }
} }
func (sh *srHelper) streamsFailed(sts []sttypes.StreamID, reason string) {
for _, st := range sts {
sh.syncProtocol.StreamFailed(st, reason)
}
}
// blameAllStreams only not to blame all whitelisted streams when the it's not the last block signature verification failed. // blameAllStreams only not to blame all whitelisted streams when the it's not the last block signature verification failed.
func (sh *srHelper) blameAllStreams(blocks types.Blocks, errIndex int, err error) bool { func (sh *srHelper) blameAllStreams(blocks types.Blocks, errIndex int, err error) bool {
if errors.As(err, &emptySigVerifyErr) && errIndex == len(blocks)-1 { if errors.As(err, &emptySigVerifyErr) && errIndex == len(blocks)-1 {

@ -10,6 +10,7 @@ import (
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
sttypes "github.com/harmony-one/harmony/p2p/stream/types" sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/harmony-one/harmony/shard"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -60,6 +61,11 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev
return nil return nil
} }
// shouldn't execute for epoch chain
if b.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode {
return nil
}
maxHeight := s.state.status.targetBN maxHeight := s.state.status.targetBN
currentHead := b.configs.bc.CurrentBlock().NumberU64() currentHead := b.configs.bc.CurrentBlock().NumberU64()
if currentHead >= maxHeight { if currentHead >= maxHeight {
@ -77,7 +83,7 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev
return errV return errV
} }
if currProgress == 0 { if currProgress <= currentHead {
if err := b.cleanAllBlockDBs(ctx); err != nil { if err := b.cleanAllBlockDBs(ctx); err != nil {
return err return err
} }
@ -209,7 +215,7 @@ func (b *StageBodies) redownloadBadBlock(ctx context.Context, s *StageState) err
isOneOfTheBadStreams := false isOneOfTheBadStreams := false
for _, id := range s.state.invalidBlock.StreamID { for _, id := range s.state.invalidBlock.StreamID {
if id == stid { if id == stid {
b.configs.protocol.RemoveStream(stid) b.configs.protocol.StreamFailed(stid, "re-download bad block from this stream failed")
isOneOfTheBadStreams = true isOneOfTheBadStreams = true
break break
} }

@ -5,6 +5,7 @@ import (
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/harmony-one/harmony/shard" "github.com/harmony-one/harmony/shard"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -51,9 +52,12 @@ func (sr *StageEpoch) Exec(ctx context.Context, firstCycle bool, invalidBlockRev
n, err := sr.doShortRangeSyncForEpochSync(ctx, s) n, err := sr.doShortRangeSyncForEpochSync(ctx, s)
s.state.inserted = n s.state.inserted = n
if err != nil { if err != nil {
utils.Logger().Info().Err(err).Msg("short range for epoch sync failed")
return err return err
} }
if n > 0 {
utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("epoch sync short range blocks inserted successfully")
}
useInternalTx := tx == nil useInternalTx := tx == nil
if useInternalTx { if useInternalTx {
var err error var err error
@ -108,30 +112,13 @@ func (sr *StageEpoch) doShortRangeSyncForEpochSync(ctx context.Context, s *Stage
return 0, nil return 0, nil
} }
//////////////////////////////////////////////////////// blocks, streamID, err := sh.getBlocksChain(ctx, bns)
hashChain, whitelist, err := sh.getHashChain(ctx, bns)
if err != nil { if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return 0, nil
}
return 0, errors.Wrap(err, "getHashChain") return 0, errors.Wrap(err, "getHashChain")
} }
if len(hashChain) == 0 {
// short circuit for no sync is needed
return 0, nil
}
blocks, streamID, err := sh.getBlocksByHashes(ctx, hashChain, whitelist)
if err != nil {
utils.Logger().Warn().Err(err).Msg("epoch sync getBlocksByHashes failed")
if !errors.Is(err, context.Canceled) {
sh.removeStreams(whitelist) // Remote nodes cannot provide blocks with target hashes
}
return 0, errors.Wrap(err, "epoch sync getBlocksByHashes")
}
///////////////////////////////////////////////////////
// TODO: check this
// blocks, streamID, err := sh.getBlocksChain(bns)
// if err != nil {
// return 0, errors.Wrap(err, "getHashChain")
// }
///////////////////////////////////////////////////////
if len(blocks) == 0 { if len(blocks) == 0 {
// short circuit for no sync is needed // short circuit for no sync is needed
return 0, nil return 0, nil
@ -141,12 +128,9 @@ func (sr *StageEpoch) doShortRangeSyncForEpochSync(ctx context.Context, s *Stage
numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n)) numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n))
if err != nil { if err != nil {
utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("Insert block failed") utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("Insert block failed")
sh.removeStreams(streamID) // Data provided by remote nodes is corrupted sh.streamsFailed([]sttypes.StreamID{streamID}, "corrupted data")
return n, err return n, err
} }
if n > 0 {
utils.Logger().Info().Int("blocks inserted", n).Msg("Insert block success")
}
return n, nil return n, nil
} }

@ -0,0 +1,109 @@
package stagedstreamsync
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/shard"
"github.com/ledgerwatch/erigon-lib/kv"
)
type StageLastMile struct {
configs StageLastMileCfg
}
type StageLastMileCfg struct {
ctx context.Context
bc core.BlockChain
db kv.RwDB
}
func NewStageLastMile(cfg StageLastMileCfg) *StageLastMile {
return &StageLastMile{
configs: cfg,
}
}
func NewStageLastMileCfg(ctx context.Context, bc core.BlockChain, db kv.RwDB) StageLastMileCfg {
return StageLastMileCfg{
ctx: ctx,
bc: bc,
db: db,
}
}
func (lm *StageLastMile) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) {
// no need to download the last mile blocks if we are redoing the stages because of bad block
if invalidBlockRevert {
return nil
}
// shouldn't execute for epoch chain
if lm.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode {
return nil
}
bc := lm.configs.bc
// update last mile blocks if any
parentHash := bc.CurrentBlock().Hash()
var hashes []common.Hash
for {
block := s.state.getBlockFromLastMileBlocksByParentHash(parentHash)
if block == nil {
break
}
err = s.state.UpdateBlockAndStatus(block, bc, false)
if err != nil {
s.state.RollbackLastMileBlocks(ctx, hashes)
return err
}
hashes = append(hashes, block.Hash())
parentHash = block.Hash()
}
s.state.purgeLastMileBlocksFromCache()
return nil
}
func (lm *StageLastMile) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) {
useInternalTx := tx == nil
if useInternalTx {
tx, err = lm.configs.db.BeginRw(lm.configs.ctx)
if err != nil {
return err
}
defer tx.Rollback()
}
if err = u.Done(tx); err != nil {
return err
}
if useInternalTx {
if err = tx.Commit(); err != nil {
return err
}
}
return nil
}
func (lm *StageLastMile) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) {
useInternalTx := tx == nil
if useInternalTx {
tx, err = lm.configs.db.BeginRw(lm.configs.ctx)
if err != nil {
return err
}
defer tx.Rollback()
}
if useInternalTx {
if err = tx.Commit(); err != nil {
return err
}
}
return nil
}

@ -44,6 +44,7 @@ func (sr *StageShortRange) Exec(ctx context.Context, firstCycle bool, invalidBlo
return nil return nil
} }
// shouldn't execute for epoch chain
if sr.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode { if sr.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode {
return nil return nil
} }
@ -52,8 +53,12 @@ func (sr *StageShortRange) Exec(ctx context.Context, firstCycle bool, invalidBlo
n, err := sr.doShortRangeSync(ctx, s) n, err := sr.doShortRangeSync(ctx, s)
s.state.inserted = n s.state.inserted = n
if err != nil { if err != nil {
utils.Logger().Info().Err(err).Msg("short range sync failed")
return err return err
} }
if n > 0 {
utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("short range blocks inserted successfully")
}
useInternalTx := tx == nil useInternalTx := tx == nil
if useInternalTx { if useInternalTx {
@ -98,6 +103,9 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState)
blkNums := sh.prepareBlockHashNumbers(curBN) blkNums := sh.prepareBlockHashNumbers(curBN)
hashChain, whitelist, err := sh.getHashChain(ctx, blkNums) hashChain, whitelist, err := sh.getHashChain(ctx, blkNums)
if err != nil { if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return 0, nil
}
return 0, errors.Wrap(err, "getHashChain") return 0, errors.Wrap(err, "getHashChain")
} }
@ -114,37 +122,34 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState)
s.state.status.setTargetBN(expEndBN) s.state.status.setTargetBN(expEndBN)
s.state.status.startSyncing()
defer func() {
utils.Logger().Info().Msg("short range finished syncing")
s.state.status.finishSyncing()
}()
blocks, stids, err := sh.getBlocksByHashes(ctx, hashChain, whitelist) blocks, stids, err := sh.getBlocksByHashes(ctx, hashChain, whitelist)
if err != nil { if err != nil {
utils.Logger().Warn().Err(err).Msg("getBlocksByHashes failed") utils.Logger().Warn().Err(err).Msg("getBlocksByHashes failed")
if !errors.Is(err, context.Canceled) { if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
sh.removeStreams(whitelist) // Remote nodes cannot provide blocks with target hashes return 0, errors.Wrap(err, "getBlocksByHashes")
} }
return 0, errors.Wrap(err, "getBlocksByHashes") sh.streamsFailed(whitelist, "remote nodes cannot provide blocks with target hashes")
} }
utils.Logger().Info().Int("num blocks", len(blocks)).Msg("getBlockByHashes result")
n, err := verifyAndInsertBlocks(sr.configs.bc, blocks) n, err := verifyAndInsertBlocks(sr.configs.bc, blocks)
numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n)) numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n))
if err != nil { if err != nil {
utils.Logger().Warn().Err(err).Int("blocks inserted", n).Msg("Insert block failed") utils.Logger().Warn().Err(err).Int("blocks inserted", n).Msg("Insert block failed")
// rollback all added new blocks
if rbErr := sr.configs.bc.Rollback(hashChain); rbErr != nil {
utils.Logger().Error().Err(rbErr).Msg("short range failed to rollback")
return 0, rbErr
}
// fail streams
if sh.blameAllStreams(blocks, n, err) { if sh.blameAllStreams(blocks, n, err) {
sh.removeStreams(whitelist) // Data provided by remote nodes is corrupted sh.streamsFailed(whitelist, "data provided by remote nodes is corrupted")
} else { } else {
// It is the last block gives a wrong commit sig. Blame the provider of the last block. // It is the last block gives a wrong commit sig. Blame the provider of the last block.
st2Blame := stids[len(stids)-1] st2Blame := stids[len(stids)-1]
sh.removeStreams([]sttypes.StreamID{st2Blame}) sh.streamsFailed([]sttypes.StreamID{st2Blame}, "the last block provided by stream gives a wrong commit sig")
} }
return n, err return 0, err
} }
utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("Insert block success")
return n, nil return n, nil
} }

@ -10,6 +10,7 @@ import (
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/shard"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/rs/zerolog" "github.com/rs/zerolog"
) )
@ -57,6 +58,11 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR
return nil return nil
} }
// shouldn't execute for epoch chain
if stg.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode {
return nil
}
maxHeight := s.state.status.targetBN maxHeight := s.state.status.targetBN
currentHead := stg.configs.bc.CurrentBlock().NumberU64() currentHead := stg.configs.bc.CurrentBlock().NumberU64()
if currentHead >= maxHeight { if currentHead >= maxHeight {
@ -144,8 +150,10 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR
if block.NumberU64() != i { if block.NumberU64() != i {
s.state.protocol.StreamFailed(streamID, "invalid block with unmatched number is received from stream") s.state.protocol.StreamFailed(streamID, "invalid block with unmatched number is received from stream")
invalidBlockHash := block.Hash() if !invalidBlockRevert {
reverter.RevertTo(stg.configs.bc.CurrentBlock().NumberU64(), i, invalidBlockHash, streamID) invalidBlockHash := block.Hash()
reverter.RevertTo(stg.configs.bc.CurrentBlock().NumberU64(), i, invalidBlockHash, streamID)
}
return ErrInvalidBlockNumber return ErrInvalidBlockNumber
} }

@ -8,11 +8,16 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/chain"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
syncproto "github.com/harmony-one/harmony/p2p/stream/protocols/sync" syncproto "github.com/harmony-one/harmony/p2p/stream/protocols/sync"
sttypes "github.com/harmony-one/harmony/p2p/stream/types" sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog" "github.com/rs/zerolog"
) )
@ -54,19 +59,22 @@ func (ib *InvalidBlock) addBadStream(bsID sttypes.StreamID) {
} }
type StagedStreamSync struct { type StagedStreamSync struct {
bc core.BlockChain bc core.BlockChain
isBeacon bool consensus *consensus.Consensus
isExplorer bool isBeacon bool
db kv.RwDB isExplorer bool
protocol syncProtocol db kv.RwDB
isBeaconNode bool protocol syncProtocol
gbm *blockDownloadManager // initialized when finished get block number isBeaconNode bool
inserted int gbm *blockDownloadManager // initialized when finished get block number
config Config lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus
logger zerolog.Logger lastMileMux sync.Mutex
status *status //TODO: merge this with currentSyncCycle inserted int
initSync bool // if sets to true, node start long range syncing config Config
UseMemDB bool logger zerolog.Logger
status *status //TODO: merge this with currentSyncCycle
initSync bool // if sets to true, node start long range syncing
UseMemDB bool
revertPoint *uint64 // used to run stages revertPoint *uint64 // used to run stages
prevRevertPoint *uint64 // used to get value from outside of staged sync after cycle (for example to notify RPCDaemon) prevRevertPoint *uint64 // used to get value from outside of staged sync after cycle (for example to notify RPCDaemon)
@ -249,12 +257,12 @@ func (s *StagedStreamSync) cleanUp(ctx context.Context, fromStage int, db kv.RwD
// New creates a new StagedStreamSync instance // New creates a new StagedStreamSync instance
func New( func New(
bc core.BlockChain, bc core.BlockChain,
consensus *consensus.Consensus,
db kv.RwDB, db kv.RwDB,
stagesList []*Stage, stagesList []*Stage,
isBeacon bool, isBeacon bool,
protocol syncProtocol, protocol syncProtocol,
isBeaconNode bool, isBeaconNode bool,
useMemDB bool,
config Config, config Config,
logger zerolog.Logger, logger zerolog.Logger,
) *StagedStreamSync { ) *StagedStreamSync {
@ -286,22 +294,24 @@ func New(
status := newStatus() status := newStatus()
return &StagedStreamSync{ return &StagedStreamSync{
bc: bc, bc: bc,
isBeacon: isBeacon, consensus: consensus,
db: db, isBeacon: isBeacon,
protocol: protocol, db: db,
isBeaconNode: isBeaconNode, protocol: protocol,
gbm: nil, isBeaconNode: isBeaconNode,
status: &status, lastMileBlocks: []*types.Block{},
inserted: 0, gbm: nil,
config: config, status: &status,
logger: logger, inserted: 0,
stages: stagesList, config: config,
currentStage: 0, logger: logger,
revertOrder: revertStages, stages: stagesList,
pruningOrder: pruneStages, currentStage: 0,
logPrefixes: logPrefixes, revertOrder: revertStages,
UseMemDB: useMemDB, pruningOrder: pruneStages,
logPrefixes: logPrefixes,
UseMemDB: config.UseMemDB,
} }
} }
@ -583,3 +593,133 @@ func (s *StagedStreamSync) EnableStages(ids ...SyncStageID) {
} }
} }
} }
func (ss *StagedStreamSync) purgeLastMileBlocksFromCache() {
ss.lastMileMux.Lock()
ss.lastMileBlocks = nil
ss.lastMileMux.Unlock()
}
// AddLastMileBlock adds the latest a few block into queue for syncing
// only keep the latest blocks with size capped by LastMileBlocksSize
func (ss *StagedStreamSync) AddLastMileBlock(block *types.Block) {
ss.lastMileMux.Lock()
defer ss.lastMileMux.Unlock()
if ss.lastMileBlocks != nil {
if len(ss.lastMileBlocks) >= LastMileBlocksSize {
ss.lastMileBlocks = ss.lastMileBlocks[1:]
}
ss.lastMileBlocks = append(ss.lastMileBlocks, block)
}
}
func (ss *StagedStreamSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Hash) *types.Block {
ss.lastMileMux.Lock()
defer ss.lastMileMux.Unlock()
for _, block := range ss.lastMileBlocks {
ph := block.ParentHash()
if ph == parentHash {
return block
}
}
return nil
}
func (ss *StagedStreamSync) addConsensusLastMile(bc core.BlockChain, cs *consensus.Consensus) ([]common.Hash, error) {
curNumber := bc.CurrentBlock().NumberU64()
var hashes []common.Hash
err := cs.GetLastMileBlockIter(curNumber+1, func(blockIter *consensus.LastMileBlockIter) error {
for {
block := blockIter.Next()
if block == nil {
break
}
if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil {
return errors.Wrap(err, "failed to InsertChain")
}
hashes = append(hashes, block.Header().Hash())
}
return nil
})
return hashes, err
}
func (ss *StagedStreamSync) RollbackLastMileBlocks(ctx context.Context, hashes []common.Hash) error {
if len(hashes) == 0 {
return nil
}
utils.Logger().Info().
Interface("block", ss.bc.CurrentBlock()).
Msg("[STAGED_STREAM_SYNC] Rolling back last mile blocks")
if err := ss.bc.Rollback(hashes); err != nil {
utils.Logger().Error().Err(err).
Msg("[STAGED_STREAM_SYNC] failed to rollback last mile blocks")
return err
}
return nil
}
// UpdateBlockAndStatus updates block and its status in db
func (ss *StagedStreamSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain, verifyAllSig bool) error {
if block.NumberU64() != bc.CurrentBlock().NumberU64()+1 {
utils.Logger().Debug().
Uint64("curBlockNum", bc.CurrentBlock().NumberU64()).
Uint64("receivedBlockNum", block.NumberU64()).
Msg("[STAGED_STREAM_SYNC] Inappropriate block number, ignore!")
return nil
}
haveCurrentSig := len(block.GetCurrentCommitSig()) != 0
// Verify block signatures
if block.NumberU64() > 1 {
// Verify signature every N blocks (which N is verifyHeaderBatchSize and can be adjusted in configs)
verifySeal := block.NumberU64()%VerifyHeaderBatchSize == 0 || verifyAllSig
verifyCurrentSig := verifyAllSig && haveCurrentSig
if verifyCurrentSig {
sig, bitmap, err := chain.ParseCommitSigAndBitmap(block.GetCurrentCommitSig())
if err != nil {
return errors.Wrap(err, "parse commitSigAndBitmap")
}
startTime := time.Now()
if err := bc.Engine().VerifyHeaderSignature(bc, block.Header(), sig, bitmap); err != nil {
return errors.Wrapf(err, "verify header signature %v", block.Hash().String())
}
utils.Logger().Debug().
Int64("elapsed time", time.Now().Sub(startTime).Milliseconds()).
Msg("[STAGED_STREAM_SYNC] VerifyHeaderSignature")
}
err := bc.Engine().VerifyHeader(bc, block.Header(), verifySeal)
if err == engine.ErrUnknownAncestor {
return nil
} else if err != nil {
utils.Logger().Error().
Err(err).
Uint64("block number", block.NumberU64()).
Msgf("[STAGED_STREAM_SYNC] UpdateBlockAndStatus: failed verifying signatures for new block")
return err
}
}
_, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */)
if err != nil {
utils.Logger().Error().
Err(err).
Uint64("block number", block.NumberU64()).
Uint32("shard", block.ShardID()).
Msgf("[STAGED_STREAM_SYNC] UpdateBlockAndStatus: Error adding new block to blockchain")
return err
}
utils.Logger().Info().
Uint64("blockHeight", block.NumberU64()).
Uint64("blockEpoch", block.Epoch().Uint64()).
Str("blockHex", block.Hash().Hex()).
Uint32("ShardID", block.ShardID()).
Msg("[STAGED_STREAM_SYNC] UpdateBlockAndStatus: New Block Added to Blockchain")
for i, tx := range block.StakingTransactions() {
utils.Logger().Info().Msgf("StakingTxn %d: %s, %v", i, tx.StakingType().String(), tx.StakingMessage())
}
return nil
}

@ -13,6 +13,7 @@ const (
SyncEpoch SyncStageID = "SyncEpoch" // epoch sync SyncEpoch SyncStageID = "SyncEpoch" // epoch sync
BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified
States SyncStageID = "States" // will construct most recent state from downloaded blocks States SyncStageID = "States" // will construct most recent state from downloaded blocks
LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well
Finish SyncStageID = "Finish" // Nominal stage after all other stages Finish SyncStageID = "Finish" // Nominal stage after all other stages
) )

@ -4,13 +4,15 @@ import (
"context" "context"
"fmt" "fmt"
"path/filepath" "path/filepath"
"runtime"
"strings"
"sync" "sync"
"time" "time"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils"
sttypes "github.com/harmony-one/harmony/p2p/stream/types" sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/harmony-one/harmony/shard"
"github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/memdb"
@ -38,41 +40,53 @@ var Buckets = []string{
// CreateStagedSync creates an instance of staged sync // CreateStagedSync creates an instance of staged sync
func CreateStagedSync(ctx context.Context, func CreateStagedSync(ctx context.Context,
bc core.BlockChain, bc core.BlockChain,
consensus *consensus.Consensus,
dbDir string, dbDir string,
UseMemDB bool,
isBeaconNode bool, isBeaconNode bool,
protocol syncProtocol, protocol syncProtocol,
config Config, config Config,
logger zerolog.Logger, logger zerolog.Logger,
logProgress bool,
) (*StagedStreamSync, error) { ) (*StagedStreamSync, error) {
isBeacon := bc.ShardID() == shard.BeaconChainShardID logger.Info().
Uint32("shard", bc.ShardID()).
Bool("beaconNode", isBeaconNode).
Bool("memdb", config.UseMemDB).
Str("dbDir", dbDir).
Bool("serverOnly", config.ServerOnly).
Int("minStreams", config.MinStreams).
Msg(WrapStagedSyncMsg("creating staged sync"))
var mainDB kv.RwDB var mainDB kv.RwDB
dbs := make([]kv.RwDB, config.Concurrency) dbs := make([]kv.RwDB, config.Concurrency)
if UseMemDB { if config.UseMemDB {
mainDB = memdb.New(getMemDbTempPath(dbDir, -1)) mainDB = memdb.New(getBlockDbPath(bc.ShardID(), isBeaconNode, -1, dbDir))
for i := 0; i < config.Concurrency; i++ { for i := 0; i < config.Concurrency; i++ {
dbs[i] = memdb.New(getMemDbTempPath(dbDir, i)) dbPath := getBlockDbPath(bc.ShardID(), isBeaconNode, i, dbDir)
dbs[i] = memdb.New(dbPath)
} }
} else { } else {
mainDB = mdbx.NewMDBX(log.New()).Path(getBlockDbPath(isBeacon, -1, dbDir)).MustOpen() logger.Info().
Str("path", getBlockDbPath(bc.ShardID(), isBeaconNode, -1, dbDir)).
Msg(WrapStagedSyncMsg("creating main db"))
mainDB = mdbx.NewMDBX(log.New()).Path(getBlockDbPath(bc.ShardID(), isBeaconNode, -1, dbDir)).MustOpen()
for i := 0; i < config.Concurrency; i++ { for i := 0; i < config.Concurrency; i++ {
dbPath := getBlockDbPath(isBeacon, i, dbDir) dbPath := getBlockDbPath(bc.ShardID(), isBeaconNode, i, dbDir)
dbs[i] = mdbx.NewMDBX(log.New()).Path(dbPath).MustOpen() dbs[i] = mdbx.NewMDBX(log.New()).Path(dbPath).MustOpen()
} }
} }
if errInitDB := initDB(ctx, mainDB, dbs, config.Concurrency); errInitDB != nil { if errInitDB := initDB(ctx, mainDB, dbs, config.Concurrency); errInitDB != nil {
logger.Error().Err(errInitDB).Msg("create staged sync instance failed")
return nil, errInitDB return nil, errInitDB
} }
stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) stageHeadsCfg := NewStageHeadersCfg(bc, mainDB)
stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB)
stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB)
stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeacon, logProgress) stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress)
stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, logProgress) stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress)
lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB)
stageFinishCfg := NewStageFinishCfg(mainDB) stageFinishCfg := NewStageFinishCfg(mainDB)
stages := DefaultStages(ctx, stages := DefaultStages(ctx,
@ -81,17 +95,27 @@ func CreateStagedSync(ctx context.Context,
stageShortRangeCfg, stageShortRangeCfg,
stageBodiesCfg, stageBodiesCfg,
stageStatesCfg, stageStatesCfg,
lastMileCfg,
stageFinishCfg, stageFinishCfg,
) )
logger.Info().
Uint32("shard", bc.ShardID()).
Bool("beaconNode", isBeaconNode).
Bool("memdb", config.UseMemDB).
Str("dbDir", dbDir).
Bool("serverOnly", config.ServerOnly).
Int("minStreams", config.MinStreams).
Msg(WrapStagedSyncMsg("staged sync created successfully"))
return New( return New(
bc, bc,
consensus,
mainDB, mainDB,
stages, stages,
isBeacon, isBeaconNode,
protocol, protocol,
isBeaconNode, isBeaconNode,
UseMemDB,
config, config,
logger, logger,
), nil ), nil
@ -147,7 +171,7 @@ func getMemDbTempPath(dbDir string, dbIndex int) string {
} }
// getBlockDbPath returns the path of the cache database which stores blocks // getBlockDbPath returns the path of the cache database which stores blocks
func getBlockDbPath(beacon bool, loopID int, dbDir string) string { func getBlockDbPath(shardID uint32, beacon bool, loopID int, dbDir string) string {
if beacon { if beacon {
if loopID >= 0 { if loopID >= 0 {
return fmt.Sprintf("%s_%d", filepath.Join(dbDir, "cache/beacon_blocks_db"), loopID) return fmt.Sprintf("%s_%d", filepath.Join(dbDir, "cache/beacon_blocks_db"), loopID)
@ -156,30 +180,57 @@ func getBlockDbPath(beacon bool, loopID int, dbDir string) string {
} }
} else { } else {
if loopID >= 0 { if loopID >= 0 {
return fmt.Sprintf("%s_%d", filepath.Join(dbDir, "cache/blocks_db"), loopID) return fmt.Sprintf("%s_%d_%d", filepath.Join(dbDir, "cache/blocks_db"), shardID, loopID)
} else { } else {
return filepath.Join(dbDir, "cache/blocks_db_main") return fmt.Sprintf("%s_%d", filepath.Join(dbDir, "cache/blocks_db_main"), shardID)
} }
} }
} }
func (s *StagedStreamSync) Debug(source string, msg interface{}) {
// only log the msg in debug mode
if !s.config.DebugMode {
return
}
pc, _, _, _ := runtime.Caller(1)
caller := runtime.FuncForPC(pc).Name()
callerParts := strings.Split(caller, "/")
if len(callerParts) > 0 {
caller = callerParts[len(callerParts)-1]
}
src := source
if src == "" {
src = "message"
}
// SSSD: STAGED STREAM SYNC DEBUG
if msg == nil {
fmt.Printf("[SSSD:%s] %s: nil or no error\n", caller, src)
} else if err, ok := msg.(error); ok {
fmt.Printf("[SSSD:%s] %s: %s\n", caller, src, err.Error())
} else if str, ok := msg.(string); ok {
fmt.Printf("[SSSD:%s] %s: %s\n", caller, src, str)
} else {
fmt.Printf("[SSSD:%s] %s: %v\n", caller, src, msg)
}
}
// doSync does the long range sync. // doSync does the long range sync.
// One LongRangeSync consists of several iterations. // One LongRangeSync consists of several iterations.
// For each iteration, estimate the current block number, then fetch block & insert to blockchain // For each iteration, estimate the current block number, then fetch block & insert to blockchain
func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bool) (int, error) { func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bool) (uint64, int, error) {
var totalInserted int var totalInserted int
s.initSync = initSync s.initSync = initSync
if err := s.checkPrerequisites(); err != nil { if err := s.checkPrerequisites(); err != nil {
return 0, err return 0, 0, err
} }
var estimatedHeight uint64 var estimatedHeight uint64
if initSync { if initSync {
if h, err := s.estimateCurrentNumber(downloaderContext); err != nil { if h, err := s.estimateCurrentNumber(downloaderContext); err != nil {
return 0, err return 0, 0, err
} else { } else {
estimatedHeight = h estimatedHeight = h
//TODO: use directly currentCycle var //TODO: use directly currentCycle var
@ -187,36 +238,70 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo
} }
if curBN := s.bc.CurrentBlock().NumberU64(); estimatedHeight <= curBN { if curBN := s.bc.CurrentBlock().NumberU64(); estimatedHeight <= curBN {
s.logger.Info().Uint64("current number", curBN).Uint64("target number", estimatedHeight). s.logger.Info().Uint64("current number", curBN).Uint64("target number", estimatedHeight).
Msg(WrapStagedSyncMsg("early return of long range sync")) Msg(WrapStagedSyncMsg("early return of long range sync (chain is already ahead of target height)"))
return 0, nil return estimatedHeight, 0, nil
} }
s.startSyncing()
defer s.finishSyncing()
} }
s.startSyncing()
defer s.finishSyncing()
for { for {
ctx, cancel := context.WithCancel(downloaderContext) ctx, cancel := context.WithCancel(downloaderContext)
n, err := s.doSyncCycle(ctx, initSync) n, err := s.doSyncCycle(ctx, initSync)
if err != nil { if err != nil {
utils.Logger().Error().
Err(err).
Bool("initSync", s.initSync).
Bool("isBeacon", s.isBeacon).
Uint32("shard", s.bc.ShardID()).
Msg(WrapStagedSyncMsg("sync cycle failed"))
pl := s.promLabels() pl := s.promLabels()
pl["error"] = err.Error() pl["error"] = err.Error()
numFailedDownloadCounterVec.With(pl).Inc() numFailedDownloadCounterVec.With(pl).Inc()
cancel() cancel()
return totalInserted + n, err return estimatedHeight, totalInserted + n, err
} }
cancel() cancel()
totalInserted += n totalInserted += n
// if it's not long range sync, skip loop // if it's not long range sync, skip loop
if n < LastMileBlocksThreshold || !initSync { if n == 0 || !initSync {
return totalInserted, nil break
}
}
if totalInserted > 0 {
utils.Logger().Info().
Bool("initSync", s.initSync).
Bool("isBeacon", s.isBeacon).
Uint32("shard", s.bc.ShardID()).
Int("blocks", totalInserted).
Msg(WrapStagedSyncMsg("sync cycle blocks inserted successfully"))
}
// add consensus last mile blocks
if s.consensus != nil {
if hashes, err := s.addConsensusLastMile(s.Blockchain(), s.consensus); err != nil {
utils.Logger().Error().Err(err).
Msg("[STAGED_STREAM_SYNC] Add consensus last mile failed")
s.RollbackLastMileBlocks(downloaderContext, hashes)
return estimatedHeight, totalInserted, err
} else {
totalInserted += len(hashes)
}
// TODO: move this to explorer handler code.
if s.isExplorer {
s.consensus.UpdateConsensusInformation()
} }
} }
s.purgeLastMileBlocksFromCache()
return estimatedHeight, totalInserted, nil
} }
func (s *StagedStreamSync) doSyncCycle(ctx context.Context, initSync bool) (int, error) { func (s *StagedStreamSync) doSyncCycle(ctx context.Context, initSync bool) (int, error) {

@ -83,6 +83,9 @@ type StagedSync struct {
StagedSyncTurboMode bool StagedSyncTurboMode bool
// log the full sync progress in console // log the full sync progress in console
LogProgress bool LogProgress bool
// log every single process and error to help to debug the syncing
// DebugMode is not accessible to the end user and is only an aid for development
DebugMode bool
} }
// BlockWithSig the serialization structure for request DownloaderRequest_BLOCKWITHSIG // BlockWithSig the serialization structure for request DownloaderRequest_BLOCKWITHSIG
@ -258,7 +261,8 @@ func New(ctx context.Context,
verifyAllSig bool, verifyAllSig bool,
verifyHeaderBatchSize uint64, verifyHeaderBatchSize uint64,
insertChainBatchSize int, insertChainBatchSize int,
logProgress bool) *StagedSync { logProgress bool,
debugMode bool) *StagedSync {
revertStages := make([]*Stage, len(stagesList)) revertStages := make([]*Stage, len(stagesList))
for i, stageIndex := range revertOrder { for i, stageIndex := range revertOrder {
@ -312,6 +316,7 @@ func New(ctx context.Context,
VerifyHeaderBatchSize: verifyHeaderBatchSize, VerifyHeaderBatchSize: verifyHeaderBatchSize,
InsertChainBatchSize: insertChainBatchSize, InsertChainBatchSize: insertChainBatchSize,
LogProgress: logProgress, LogProgress: logProgress,
DebugMode: debugMode,
} }
} }

@ -64,6 +64,7 @@ func CreateStagedSync(
verifyHeaderBatchSize uint64, verifyHeaderBatchSize uint64,
insertChainBatchSize int, insertChainBatchSize int,
logProgress bool, logProgress bool,
debugMode bool,
) (*StagedSync, error) { ) (*StagedSync, error) {
ctx := context.Background() ctx := context.Background()
@ -134,6 +135,7 @@ func CreateStagedSync(
verifyHeaderBatchSize, verifyHeaderBatchSize,
insertChainBatchSize, insertChainBatchSize,
logProgress, logProgress,
debugMode,
), nil ), nil
} }

@ -178,6 +178,7 @@ var defaultStagedSyncConfig = harmonyconfig.StagedSyncConfig{
MaxMemSyncCycleSize: 1024, // max number of blocks to use a single transaction for staged sync MaxMemSyncCycleSize: 1024, // max number of blocks to use a single transaction for staged sync
UseMemDB: true, // it uses memory by default. set it to false to use disk UseMemDB: true, // it uses memory by default. set it to false to use disk
LogProgress: false, // log the full sync progress in console LogProgress: false, // log the full sync progress in console
DebugMode: false, // log every single process and error to help to debug the syncing (DebugMode is not accessible to the end user and is only an aid for development)
} }
var ( var (

@ -606,6 +606,7 @@ func createGlobalConfig(hc harmonyconfig.HarmonyConfig) (*nodeconfig.ConfigType,
nodeConfig.VerifyHeaderBatchSize = hc.Sync.StagedSyncCfg.VerifyHeaderBatchSize nodeConfig.VerifyHeaderBatchSize = hc.Sync.StagedSyncCfg.VerifyHeaderBatchSize
nodeConfig.InsertChainBatchSize = hc.Sync.StagedSyncCfg.InsertChainBatchSize nodeConfig.InsertChainBatchSize = hc.Sync.StagedSyncCfg.InsertChainBatchSize
nodeConfig.LogProgress = hc.Sync.StagedSyncCfg.LogProgress nodeConfig.LogProgress = hc.Sync.StagedSyncCfg.LogProgress
nodeConfig.DebugMode = hc.Sync.StagedSyncCfg.DebugMode
// P2P private key is used for secure message transfer between p2p nodes. // P2P private key is used for secure message transfer between p2p nodes.
nodeConfig.P2PPriKey, _, err = utils.LoadKeyFromFile(hc.P2P.KeyFile) nodeConfig.P2PPriKey, _, err = utils.LoadKeyFromFile(hc.P2P.KeyFile)
if err != nil { if err != nil {
@ -942,7 +943,9 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har
SmHardLowCap: hc.Sync.DiscHardLowCap, SmHardLowCap: hc.Sync.DiscHardLowCap,
SmHiCap: hc.Sync.DiscHighCap, SmHiCap: hc.Sync.DiscHighCap,
SmDiscBatch: hc.Sync.DiscBatch, SmDiscBatch: hc.Sync.DiscBatch,
LogProgress: node.NodeConfig.LogProgress, UseMemDB: hc.Sync.StagedSyncCfg.UseMemDB,
LogProgress: hc.Sync.StagedSyncCfg.LogProgress,
DebugMode: hc.Sync.StagedSyncCfg.DebugMode,
} }
// If we are running side chain, we will need to do some extra works for beacon // If we are running side chain, we will need to do some extra works for beacon
@ -954,7 +957,7 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har
} }
} }
//Setup stream sync service //Setup stream sync service
s := stagedstreamsync.NewService(host, blockchains, sConfig, hc.General.DataDir) s := stagedstreamsync.NewService(host, blockchains, node.Consensus, sConfig, hc.General.DataDir)
node.RegisterService(service.StagedStreamSync, s) node.RegisterService(service.StagedStreamSync, s)

@ -157,10 +157,11 @@ func (consensus *Consensus) finalCommit() {
Msg("[finalCommit] Unable to construct Committed message") Msg("[finalCommit] Unable to construct Committed message")
return return
} }
msgToSend, FBFTMsg := var (
network.Bytes, msgToSend = network.Bytes
network.FBFTMsg FBFTMsg = network.FBFTMsg
commitSigAndBitmap := FBFTMsg.Payload commitSigAndBitmap = FBFTMsg.Payload
)
consensus.fBFTLog.AddVerifiedMessage(FBFTMsg) consensus.fBFTLog.AddVerifiedMessage(FBFTMsg)
// find correct block content // find correct block content
curBlockHash := consensus.blockHash curBlockHash := consensus.blockHash

@ -132,7 +132,7 @@ func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block
if err := consensus.verifyBlock(&blockObj); err != nil { if err := consensus.verifyBlock(&blockObj); err != nil {
consensus.getLogger().Error().Err(err).Msg("[validateNewBlock] Block verification failed") consensus.getLogger().Error().Err(err).Msg("[validateNewBlock] Block verification failed")
return nil, errors.New("Block verification failed") return nil, errors.Errorf("Block verification failed: %s", err.Error())
} }
return &blockObj, nil return &blockObj, nil
} }

@ -221,6 +221,7 @@ type BlockChainImpl struct {
badBlocks *lru.Cache // Bad block cache badBlocks *lru.Cache // Bad block cache
pendingSlashes slash.Records pendingSlashes slash.Records
maxGarbCollectedBlkNum int64 maxGarbCollectedBlkNum int64
leaderRotationMeta leaderRotationMeta
options Options options Options
} }
@ -359,6 +360,12 @@ func newBlockChainWithOptions(
bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Hash()) bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Hash())
} }
curHeader := bc.CurrentBlock().Header()
err = bc.buildLeaderRotationMeta(curHeader)
if err != nil {
return nil, errors.WithMessage(err, "failed to build leader rotation meta")
}
// Take ownership of this particular state // Take ownership of this particular state
go bc.update() go bc.update()
return bc, nil return bc, nil
@ -1479,8 +1486,11 @@ func (bc *BlockChainImpl) WriteBlockWithState(
defer bc.mu.Unlock() defer bc.mu.Unlock()
currentBlock := bc.CurrentBlock() currentBlock := bc.CurrentBlock()
if currentBlock == nil || block.ParentHash() != currentBlock.Hash() { if currentBlock == nil {
return NonStatTy, errors.New("Hash of parent block doesn't match the current block hash") return NonStatTy, errors.New("Current block is nil")
}
if block.ParentHash() != currentBlock.Hash() {
return NonStatTy, errors.Errorf("Hash of parent block %s doesn't match the current block hash %s", currentBlock.Hash().Hex(), block.ParentHash().Hex())
} }
// Commit state object changes to in-memory trie // Commit state object changes to in-memory trie
@ -1650,20 +1660,52 @@ func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool) (i
return n, err return n, err
} }
// buildLeaderRotationMeta builds leader rotation meta if feature is activated.
func (bc *BlockChainImpl) buildLeaderRotationMeta(curHeader *block.Header) error {
if !bc.chainConfig.IsLeaderRotation(curHeader.Epoch()) {
return nil
}
if curHeader.NumberU64() == 0 {
return errors.New("current header is genesis")
}
curPubKey, err := bc.getLeaderPubKeyFromCoinbase(curHeader)
if err != nil {
return err
}
for i := curHeader.NumberU64() - 1; i >= 0; i-- {
header := bc.GetHeaderByNumber(i)
if header == nil {
return errors.New("header is nil")
}
blockPubKey, err := bc.getLeaderPubKeyFromCoinbase(header)
if err != nil {
return err
}
if curPubKey.Bytes != blockPubKey.Bytes || curHeader.Epoch().Uint64() != header.Epoch().Uint64() {
for j := i; i <= curHeader.NumberU64(); j++ {
header := bc.GetHeaderByNumber(i)
if header == nil {
return errors.New("header is nil")
}
err := bc.saveLeaderRotationMeta(header)
if err != nil {
utils.Logger().Error().Err(err).Msg("save leader continuous blocks count error")
return err
}
}
return nil
}
}
return errors.New("no leader rotation meta to save")
}
func (bc *BlockChainImpl) saveLeaderRotationMeta(h *block.Header) error { func (bc *BlockChainImpl) saveLeaderRotationMeta(h *block.Header) error {
blockPubKey, err := bc.getLeaderPubKeyFromCoinbase(h) blockPubKey, err := bc.getLeaderPubKeyFromCoinbase(h)
if err != nil { if err != nil {
return err return err
} }
type stored struct {
pub []byte var s = bc.leaderRotationMeta
epoch uint64
count uint64
shifts uint64
}
var s stored
// error is possible here only on the first iteration, so we can ignore it
s.pub, s.epoch, s.count, s.shifts, _ = rawdb.ReadLeaderRotationMeta(bc.db)
// increase counter only if the same leader and epoch // increase counter only if the same leader and epoch
if bytes.Equal(s.pub, blockPubKey.Bytes[:]) && s.epoch == h.Epoch().Uint64() { if bytes.Equal(s.pub, blockPubKey.Bytes[:]) && s.epoch == h.Epoch().Uint64() {
@ -1679,11 +1721,9 @@ func (bc *BlockChainImpl) saveLeaderRotationMeta(h *block.Header) error {
if s.epoch != h.Epoch().Uint64() { if s.epoch != h.Epoch().Uint64() {
s.shifts = 0 s.shifts = 0
} }
s.epoch = h.Epoch().Uint64()
bc.leaderRotationMeta = s
err = rawdb.WriteLeaderRotationMeta(bc.db, blockPubKey.Bytes[:], h.Epoch().Uint64(), s.count, s.shifts)
if err != nil {
return err
}
return nil return nil
} }

@ -0,0 +1,8 @@
package core
type leaderRotationMeta struct {
pub []byte
epoch uint64
count uint64
shifts uint64
}

@ -119,8 +119,8 @@ func (p *StateProcessor) Process(
usedGas = new(uint64) usedGas = new(uint64)
header = block.Header() header = block.Header()
allLogs []*types.Log allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit()) gp = new(GasPool).AddGas(block.GasLimit())
blockStakeMsgs []staking.StakeMsg = make([]staking.StakeMsg, 0) blockStakeMsgs = make([]staking.StakeMsg, 0)
) )
beneficiary, err := p.bc.GetECDSAFromCoinbase(header) beneficiary, err := p.bc.GetECDSAFromCoinbase(header)
@ -202,7 +202,7 @@ func (p *StateProcessor) Process(
receipts, outcxs, incxs, block.StakingTransactions(), slashes, sigsReady, func() uint64 { return header.ViewID().Uint64() }, receipts, outcxs, incxs, block.StakingTransactions(), slashes, sigsReady, func() uint64 { return header.ViewID().Uint64() },
) )
if err != nil { if err != nil {
return nil, nil, nil, nil, 0, nil, statedb, errors.New("[Process] Cannot finalize block") return nil, nil, nil, nil, 0, nil, statedb, errors.WithMessage(err, "[Process] Cannot finalize block")
} }
result := &ProcessorResult{ result := &ProcessorResult{

@ -0,0 +1,83 @@
package core_test
import (
"fmt"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/consensus/quorum"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/chain"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/harmony-one/harmony/internal/registry"
"github.com/harmony-one/harmony/internal/shardchain"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/multibls"
"github.com/harmony-one/harmony/node"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/shard"
"github.com/stretchr/testify/require"
)
var testDBFactory = &shardchain.MemDBFactory{}
func TestAddNewBlock(t *testing.T) {
blsKey := bls.RandPrivateKey()
pubKey := blsKey.GetPublicKey()
leader := p2p.Peer{IP: "127.0.0.1", Port: "9882", ConsensusPubKey: pubKey}
priKey, _, _ := utils.GenKeyP2P("127.0.0.1", "9902")
host, err := p2p.NewHost(p2p.HostConfig{
Self: &leader,
BLSKey: priKey,
})
if err != nil {
t.Fatalf("newhost failure: %v", err)
}
engine := chain.NewEngine()
chainconfig := nodeconfig.GetShardConfig(shard.BeaconChainShardID).GetNetworkType().ChainConfig()
collection := shardchain.NewCollection(
nil, testDBFactory, &core.GenesisInitializer{NetworkType: nodeconfig.GetShardConfig(shard.BeaconChainShardID).GetNetworkType()}, engine, &chainconfig,
)
decider := quorum.NewDecider(
quorum.SuperMajorityVote, shard.BeaconChainShardID,
)
blockchain, err := collection.ShardChain(shard.BeaconChainShardID)
if err != nil {
t.Fatal("cannot get blockchain")
}
reg := registry.New().SetBlockchain(blockchain)
consensus, err := consensus.New(
host, shard.BeaconChainShardID, multibls.GetPrivateKeys(blsKey), reg, decider, 3, false,
)
if err != nil {
t.Fatalf("Cannot craeate consensus: %v", err)
}
nodeconfig.SetNetworkType(nodeconfig.Testnet)
var block *types.Block
node := node.New(host, consensus, engine, collection, nil, nil, nil, nil, nil, reg)
commitSigs := make(chan []byte, 1)
commitSigs <- []byte{}
block, err = node.Worker.FinalizeNewBlock(
commitSigs, func() uint64 { return uint64(0) }, common.Address{}, nil, nil,
)
if err != nil {
t.Fatal("cannot finalize new block")
}
nn := node.Blockchain().CurrentBlock()
t.Log("[*]", nn.NumberU64(), nn.Hash().Hex(), nn.ParentHash())
_, err = blockchain.InsertChain([]*types.Block{block}, false)
require.NoError(t, err, "error when adding new block")
pk, epoch, count, shifts, err := blockchain.LeaderRotationMeta()
fmt.Println("pk", pk, "epoch", epoch, "count", count, "shifts", shifts, "err", err)
t.Log("#", block.Header().NumberU64(), node.Blockchain().CurrentBlock().NumberU64(), block.Hash().Hex(), block.ParentHash())
err = blockchain.Rollback([]common.Hash{block.Hash()})
require.NoError(t, err, "error when rolling back")
}

@ -343,6 +343,7 @@ type StagedSyncConfig struct {
VerifyHeaderBatchSize uint64 // batch size to verify header before insert to chain VerifyHeaderBatchSize uint64 // batch size to verify header before insert to chain
UseMemDB bool // it uses memory by default. set it to false to use disk UseMemDB bool // it uses memory by default. set it to false to use disk
LogProgress bool // log the full sync progress in console LogProgress bool // log the full sync progress in console
DebugMode bool // log every single process and error to help to debug syncing issues (DebugMode is not accessible to the end user and is only an aid for development)
} }
type PriceLimit int64 type PriceLimit int64

@ -58,6 +58,31 @@ const (
Localnet = "localnet" Localnet = "localnet"
) )
// ChainConfig returns the chain configuration for the network type.
func (t NetworkType) ChainConfig() params.ChainConfig {
switch t {
case Mainnet:
return *params.MainnetChainConfig
case Pangaea:
return *params.PangaeaChainConfig
case Partner:
return *params.PartnerChainConfig
case Stressnet:
return *params.StressnetChainConfig
case Localnet:
return *params.LocalnetChainConfig
default:
return *params.TestnetChainConfig
}
}
func (n NetworkType) String() string {
if n == "" {
return Testnet // default to testnet
}
return string(n)
}
// Global is the index of the global node configuration // Global is the index of the global node configuration
const ( const (
Global = 0 Global = 0
@ -93,6 +118,7 @@ type ConfigType struct {
VerifyAllSig bool // verify signatures for all blocks regardless of height and batch size VerifyAllSig bool // verify signatures for all blocks regardless of height and batch size
VerifyHeaderBatchSize uint64 // batch size to verify header before insert to chain VerifyHeaderBatchSize uint64 // batch size to verify header before insert to chain
LogProgress bool // log the full sync progress in console LogProgress bool // log the full sync progress in console
DebugMode bool // log every single process and error to help to debug the syncing issues
NtpServer string NtpServer string
StringRole string StringRole string
P2PPriKey p2p_crypto.PrivKey `json:"-"` P2PPriKey p2p_crypto.PrivKey `json:"-"`
@ -351,21 +377,3 @@ func (conf *ConfigType) ValidateConsensusKeysForSameShard(pubkeys multibls.Publi
} }
return nil return nil
} }
// ChainConfig returns the chain configuration for the network type.
func (t NetworkType) ChainConfig() params.ChainConfig {
switch t {
case Mainnet:
return *params.MainnetChainConfig
case Pangaea:
return *params.PangaeaChainConfig
case Partner:
return *params.PartnerChainConfig
case Stressnet:
return *params.StressnetChainConfig
case Localnet:
return *params.LocalnetChainConfig
default:
return *params.TestnetChainConfig
}
}

@ -46,13 +46,11 @@ func TestAddNewBlock(t *testing.T) {
t.Fatal("cannot get blockchain") t.Fatal("cannot get blockchain")
} }
reg := registry.New().SetBlockchain(blockchain) reg := registry.New().SetBlockchain(blockchain)
consensus, err := consensus.New( consensus, err := consensus.New(host, shard.BeaconChainShardID, multibls.GetPrivateKeys(blsKey), reg, decider, 3, false)
host, shard.BeaconChainShardID, multibls.GetPrivateKeys(blsKey), reg, decider, 3, false,
)
if err != nil { if err != nil {
t.Fatalf("Cannot craeate consensus: %v", err) t.Fatalf("Cannot craeate consensus: %v", err)
} }
nodeconfig.SetNetworkType(nodeconfig.Devnet) nodeconfig.SetNetworkType(nodeconfig.Testnet)
node := New(host, consensus, engine, collection, nil, nil, nil, nil, nil, reg) node := New(host, consensus, engine, collection, nil, nil, nil, nil, nil, reg)
txs := make(map[common.Address]types.Transactions) txs := make(map[common.Address]types.Transactions)

@ -63,10 +63,8 @@ func TestFinalizeNewBlockAsync(t *testing.T) {
node.Worker.CommitTransactions( node.Worker.CommitTransactions(
txs, stks, common.Address{}, txs, stks, common.Address{},
) )
commitSigs := make(chan []byte) commitSigs := make(chan []byte, 1)
go func() { commitSigs <- []byte{}
commitSigs <- []byte{}
}()
block, _ := node.Worker.FinalizeNewBlock( block, _ := node.Worker.FinalizeNewBlock(
commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil, commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil,

@ -122,7 +122,8 @@ func (node *Node) createStagedSync(bc core.BlockChain) *stagedsync.StagedSync {
node.NodeConfig.VerifyAllSig, node.NodeConfig.VerifyAllSig,
node.NodeConfig.VerifyHeaderBatchSize, node.NodeConfig.VerifyHeaderBatchSize,
node.NodeConfig.InsertChainBatchSize, node.NodeConfig.InsertChainBatchSize,
node.NodeConfig.LogProgress); err != nil { node.NodeConfig.LogProgress,
node.NodeConfig.DebugMode); err != nil {
return nil return nil
} else { } else {
return s return s
@ -352,7 +353,7 @@ func (node *Node) NodeSyncing() {
if node.HarmonyConfig.TiKV.Role == tikv.RoleWriter { if node.HarmonyConfig.TiKV.Role == tikv.RoleWriter {
node.supportSyncing() // the writer needs to be in sync with it's other peers node.supportSyncing() // the writer needs to be in sync with it's other peers
} }
} else if !node.HarmonyConfig.General.IsOffline && node.HarmonyConfig.DNSSync.Client { } else if !node.HarmonyConfig.General.IsOffline && (node.HarmonyConfig.DNSSync.Client || node.HarmonyConfig.Sync.Downloader) {
node.supportSyncing() // for non-writer-reader mode a.k.a tikv nodes node.supportSyncing() // for non-writer-reader mode a.k.a tikv nodes
} }
} }
@ -372,6 +373,11 @@ func (node *Node) supportSyncing() {
go node.SendNewBlockToUnsync() go node.SendNewBlockToUnsync()
} }
// if stream sync client is running, don't create other sync client instances
if node.HarmonyConfig.Sync.Downloader {
return
}
if !node.NodeConfig.StagedSync && node.stateSync == nil { if !node.NodeConfig.StagedSync && node.stateSync == nil {
node.stateSync = node.createStateSync(node.Blockchain()) node.stateSync = node.createStateSync(node.Blockchain())
utils.Logger().Debug().Msg("[SYNC] initialized state sync") utils.Logger().Debug().Msg("[SYNC] initialized state sync")

@ -12,17 +12,17 @@ import (
"time" "time"
"github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/routing"
dht "github.com/libp2p/go-libp2p-kad-dht" dht "github.com/libp2p/go-libp2p-kad-dht"
libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub" libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub"
libp2p_config "github.com/libp2p/go-libp2p/config" libp2p_config "github.com/libp2p/go-libp2p/config"
libp2p_crypto "github.com/libp2p/go-libp2p/core/crypto" libp2p_crypto "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/host"
libp2p_host "github.com/libp2p/go-libp2p/core/host" libp2p_host "github.com/libp2p/go-libp2p/core/host"
libp2p_network "github.com/libp2p/go-libp2p/core/network" libp2p_network "github.com/libp2p/go-libp2p/core/network"
libp2p_peer "github.com/libp2p/go-libp2p/core/peer" libp2p_peer "github.com/libp2p/go-libp2p/core/peer"
libp2p_peerstore "github.com/libp2p/go-libp2p/core/peerstore" libp2p_peerstore "github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/routing"
"github.com/libp2p/go-libp2p/p2p/net/connmgr" "github.com/libp2p/go-libp2p/p2p/net/connmgr"
"github.com/libp2p/go-libp2p/p2p/security/noise" "github.com/libp2p/go-libp2p/p2p/security/noise"

@ -2,7 +2,6 @@ package sync
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
"time" "time"
@ -180,7 +179,8 @@ func (p *Protocol) HandleStream(raw libp2p_network.Stream) {
Msg("failed to add new stream") Msg("failed to add new stream")
return return
} }
fmt.Println("Connected to", raw.Conn().RemotePeer().String(), "(", st.ProtoID(), ")", "my ID: ", raw.Conn().LocalPeer().String()) //to get my ID use raw.Conn().LocalPeer().String()
p.logger.Info().Msgf("Connected to %s (%s)", raw.Conn().RemotePeer().String(), st.ProtoID())
st.run() st.run()
} }

@ -11,7 +11,7 @@ import (
nodeconfig "github.com/harmony-one/harmony/internal/configs/node" nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
"github.com/hashicorp/go-version" "github.com/hashicorp/go-version"
libp2p_proto "github.com/libp2p/go-libp2p-core/protocol" libp2p_proto "github.com/libp2p/go-libp2p/core/protocol"
"github.com/pkg/errors" "github.com/pkg/errors"
) )

@ -84,7 +84,7 @@ fi
echo "Running go test..." echo "Running go test..."
# Fix https://github.com/golang/go/issues/44129#issuecomment-788351567 # Fix https://github.com/golang/go/issues/44129#issuecomment-788351567
go get -t ./... go get -t ./...
if go test -v -count=1 -vet=all -race ./... if go test -count=1 -vet=all -race ./...
then then
echo "go test succeeded." echo "go test succeeded."
else else

Loading…
Cancel
Save