Merge pull request #4465 from harmony-one/feature/fastsync
Initial Version of Fast Syncpull/4589/head
commit
c82599b6f1
@ -0,0 +1,146 @@ |
||||
package stagedstreamsync |
||||
|
||||
import ( |
||||
"errors" |
||||
"sync" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
// ProofSet stores a set of trie nodes. It implements trie.Database and can also
|
||||
// act as a cache for another trie.Database.
|
||||
type ProofSet struct { |
||||
nodes map[string][]byte |
||||
order []string |
||||
|
||||
dataSize int |
||||
lock sync.RWMutex |
||||
} |
||||
|
||||
// NewProofSet creates an empty node set
|
||||
func NewProofSet() *ProofSet { |
||||
return &ProofSet{ |
||||
nodes: make(map[string][]byte), |
||||
} |
||||
} |
||||
|
||||
// Put stores a new node in the set
|
||||
func (db *ProofSet) Put(key []byte, value []byte) error { |
||||
db.lock.Lock() |
||||
defer db.lock.Unlock() |
||||
|
||||
if _, ok := db.nodes[string(key)]; ok { |
||||
return nil |
||||
} |
||||
keystr := string(key) |
||||
|
||||
db.nodes[keystr] = common.CopyBytes(value) |
||||
db.order = append(db.order, keystr) |
||||
db.dataSize += len(value) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Delete removes a node from the set
|
||||
func (db *ProofSet) Delete(key []byte) error { |
||||
db.lock.Lock() |
||||
defer db.lock.Unlock() |
||||
|
||||
delete(db.nodes, string(key)) |
||||
return nil |
||||
} |
||||
|
||||
// Get returns a stored node
|
||||
func (db *ProofSet) Get(key []byte) ([]byte, error) { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
if entry, ok := db.nodes[string(key)]; ok { |
||||
return entry, nil |
||||
} |
||||
return nil, errors.New("not found") |
||||
} |
||||
|
||||
// Has returns true if the node set contains the given key
|
||||
func (db *ProofSet) Has(key []byte) (bool, error) { |
||||
_, err := db.Get(key) |
||||
return err == nil, nil |
||||
} |
||||
|
||||
// KeyCount returns the number of nodes in the set
|
||||
func (db *ProofSet) KeyCount() int { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
return len(db.nodes) |
||||
} |
||||
|
||||
// DataSize returns the aggregated data size of nodes in the set
|
||||
func (db *ProofSet) DataSize() int { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
return db.dataSize |
||||
} |
||||
|
||||
// List converts the node set to a ProofList
|
||||
func (db *ProofSet) List() ProofList { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
var values ProofList |
||||
for _, key := range db.order { |
||||
values = append(values, db.nodes[key]) |
||||
} |
||||
return values |
||||
} |
||||
|
||||
// Store writes the contents of the set to the given database
|
||||
func (db *ProofSet) Store(target ethdb.KeyValueWriter) { |
||||
db.lock.RLock() |
||||
defer db.lock.RUnlock() |
||||
|
||||
for key, value := range db.nodes { |
||||
target.Put([]byte(key), value) |
||||
} |
||||
} |
||||
|
||||
// ProofList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
|
||||
type ProofList []rlp.RawValue |
||||
|
||||
// Store writes the contents of the list to the given database
|
||||
func (n ProofList) Store(db ethdb.KeyValueWriter) { |
||||
for _, node := range n { |
||||
db.Put(crypto.Keccak256(node), node) |
||||
} |
||||
} |
||||
|
||||
// Set converts the node list to a ProofSet
|
||||
func (n ProofList) Set() *ProofSet { |
||||
db := NewProofSet() |
||||
n.Store(db) |
||||
return db |
||||
} |
||||
|
||||
// Put stores a new node at the end of the list
|
||||
func (n *ProofList) Put(key []byte, value []byte) error { |
||||
*n = append(*n, value) |
||||
return nil |
||||
} |
||||
|
||||
// Delete panics as there's no reason to remove a node from the list.
|
||||
func (n *ProofList) Delete(key []byte) error { |
||||
panic("not supported") |
||||
} |
||||
|
||||
// DataSize returns the aggregated data size of nodes in the list
|
||||
func (n ProofList) DataSize() int { |
||||
var size int |
||||
for _, node := range n { |
||||
size += len(node) |
||||
} |
||||
return size |
||||
} |
@ -0,0 +1,84 @@ |
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stagedstreamsync |
||||
|
||||
import ( |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/holiman/uint256" |
||||
) |
||||
|
||||
// hashSpace is the total size of the 256 bit hash space for accounts.
|
||||
var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil) |
||||
|
||||
// hashRange is a utility to handle ranges of hashes, Split up the
|
||||
// hash-space into sections, and 'walk' over the sections
|
||||
type hashRange struct { |
||||
current *uint256.Int |
||||
step *uint256.Int |
||||
} |
||||
|
||||
// newHashRange creates a new hashRange, initiated at the start position,
|
||||
// and with the step set to fill the desired 'num' chunks
|
||||
func newHashRange(start common.Hash, num uint64) *hashRange { |
||||
left := new(big.Int).Sub(hashSpace, start.Big()) |
||||
step := new(big.Int).Div( |
||||
new(big.Int).Add(left, new(big.Int).SetUint64(num-1)), |
||||
new(big.Int).SetUint64(num), |
||||
) |
||||
step256 := new(uint256.Int) |
||||
step256.SetFromBig(step) |
||||
|
||||
return &hashRange{ |
||||
current: new(uint256.Int).SetBytes32(start[:]), |
||||
step: step256, |
||||
} |
||||
} |
||||
|
||||
// Next pushes the hash range to the next interval.
|
||||
func (r *hashRange) Next() bool { |
||||
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) |
||||
if overflow { |
||||
return false |
||||
} |
||||
r.current = next |
||||
return true |
||||
} |
||||
|
||||
// Start returns the first hash in the current interval.
|
||||
func (r *hashRange) Start() common.Hash { |
||||
return r.current.Bytes32() |
||||
} |
||||
|
||||
// End returns the last hash in the current interval.
|
||||
func (r *hashRange) End() common.Hash { |
||||
// If the end overflows (non divisible range), return a shorter interval
|
||||
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) |
||||
if overflow { |
||||
return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") |
||||
} |
||||
return next.SubUint64(next, 1).Bytes32() |
||||
} |
||||
|
||||
// incHash returns the next hash, in lexicographical order (a.k.a plus one)
|
||||
func incHash(h common.Hash) common.Hash { |
||||
var a uint256.Int |
||||
a.SetBytes32(h[:]) |
||||
a.AddUint64(&a, 1) |
||||
return common.Hash(a.Bytes32()) |
||||
} |
@ -0,0 +1,180 @@ |
||||
package stagedstreamsync |
||||
|
||||
import ( |
||||
"sync" |
||||
|
||||
"github.com/harmony-one/harmony/core/types" |
||||
sttypes "github.com/harmony-one/harmony/p2p/stream/types" |
||||
"github.com/ledgerwatch/erigon-lib/kv" |
||||
"github.com/rs/zerolog" |
||||
) |
||||
|
||||
type ReceiptDownloadDetails struct { |
||||
streamID sttypes.StreamID |
||||
} |
||||
|
||||
type Received struct { |
||||
streamID sttypes.StreamID |
||||
block *types.Block |
||||
receipts types.Receipts |
||||
} |
||||
|
||||
// receiptDownloadManager is the helper structure for get receipts request management
|
||||
type receiptDownloadManager struct { |
||||
chain blockChain |
||||
tx kv.RwTx |
||||
|
||||
targetBN uint64 |
||||
requesting map[uint64]struct{} // receipt numbers that have been assigned to workers but not received
|
||||
processing map[uint64]struct{} // receipt numbers received requests but not inserted
|
||||
retries *prioritizedNumbers // requests where error happens
|
||||
rdd map[uint64]ReceiptDownloadDetails // details about how this receipt was downloaded
|
||||
|
||||
received map[uint64]Received |
||||
|
||||
logger zerolog.Logger |
||||
lock sync.Mutex |
||||
} |
||||
|
||||
func newReceiptDownloadManager(tx kv.RwTx, chain blockChain, targetBN uint64, logger zerolog.Logger) *receiptDownloadManager { |
||||
return &receiptDownloadManager{ |
||||
chain: chain, |
||||
tx: tx, |
||||
targetBN: targetBN, |
||||
requesting: make(map[uint64]struct{}), |
||||
processing: make(map[uint64]struct{}), |
||||
retries: newPrioritizedNumbers(), |
||||
rdd: make(map[uint64]ReceiptDownloadDetails), |
||||
received: make(map[uint64]Received), |
||||
|
||||
logger: logger, |
||||
} |
||||
} |
||||
|
||||
// GetNextBatch get the next receipt numbers batch
|
||||
func (rdm *receiptDownloadManager) GetNextBatch(curHeight uint64) []uint64 { |
||||
rdm.lock.Lock() |
||||
defer rdm.lock.Unlock() |
||||
|
||||
cap := ReceiptsPerRequest |
||||
|
||||
bns := rdm.getBatchFromRetries(cap, curHeight) |
||||
if len(bns) > 0 { |
||||
cap -= len(bns) |
||||
rdm.addBatchToRequesting(bns) |
||||
} |
||||
|
||||
if rdm.availableForMoreTasks() { |
||||
addBNs := rdm.getBatchFromUnprocessed(cap, curHeight) |
||||
rdm.addBatchToRequesting(addBNs) |
||||
bns = append(bns, addBNs...) |
||||
} |
||||
|
||||
return bns |
||||
} |
||||
|
||||
// HandleRequestError handles the error result
|
||||
func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error) { |
||||
rdm.lock.Lock() |
||||
defer rdm.lock.Unlock() |
||||
|
||||
// add requested receipt numbers to retries
|
||||
for _, bn := range bns { |
||||
delete(rdm.requesting, bn) |
||||
rdm.retries.push(bn) |
||||
} |
||||
} |
||||
|
||||
// HandleRequestResult handles get receipts result
|
||||
func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receivedReceipts []types.Receipts, receivedBlocks []*types.Block, streamID sttypes.StreamID) error { |
||||
rdm.lock.Lock() |
||||
defer rdm.lock.Unlock() |
||||
|
||||
for i, bn := range bns { |
||||
delete(rdm.requesting, bn) |
||||
if !indexExists(receivedBlocks, i) || !indexExists(receivedReceipts, i) { |
||||
rdm.retries.push(bn) |
||||
} else { |
||||
rdm.processing[bn] = struct{}{} |
||||
rdm.rdd[bn] = ReceiptDownloadDetails{ |
||||
streamID: streamID, |
||||
} |
||||
rdm.received[bn] = Received{ |
||||
block: receivedBlocks[i], |
||||
receipts: receivedReceipts[i], |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// SetDownloadDetails sets the download details for a batch of blocks
|
||||
func (rdm *receiptDownloadManager) SetDownloadDetails(bns []uint64, streamID sttypes.StreamID) error { |
||||
rdm.lock.Lock() |
||||
defer rdm.lock.Unlock() |
||||
|
||||
for _, bn := range bns { |
||||
rdm.rdd[bn] = ReceiptDownloadDetails{ |
||||
streamID: streamID, |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// GetDownloadDetails returns the download details for a certain block number
|
||||
func (rdm *receiptDownloadManager) GetDownloadDetails(blockNumber uint64) (streamID sttypes.StreamID) { |
||||
rdm.lock.Lock() |
||||
defer rdm.lock.Unlock() |
||||
|
||||
return rdm.rdd[blockNumber].streamID |
||||
} |
||||
|
||||
// getBatchFromRetries get the receipt number batch to be requested from retries.
|
||||
func (rdm *receiptDownloadManager) getBatchFromRetries(cap int, fromBlockNumber uint64) []uint64 { |
||||
var ( |
||||
requestBNs []uint64 |
||||
) |
||||
for cnt := 0; cnt < cap; cnt++ { |
||||
bn := rdm.retries.pop() |
||||
if bn == 0 { |
||||
break // no more retries
|
||||
} |
||||
if bn <= fromBlockNumber { |
||||
continue |
||||
} |
||||
requestBNs = append(requestBNs, bn) |
||||
} |
||||
return requestBNs |
||||
} |
||||
|
||||
// getBatchFromUnprocessed returns a batch of receipt numbers to be requested from unprocessed.
|
||||
func (rdm *receiptDownloadManager) getBatchFromUnprocessed(cap int, curHeight uint64) []uint64 { |
||||
var ( |
||||
requestBNs []uint64 |
||||
) |
||||
bn := curHeight + 1 |
||||
// TODO: this algorithm can be potentially optimized.
|
||||
for cnt := 0; cnt < cap && bn <= rdm.targetBN; cnt++ { |
||||
for bn <= rdm.targetBN { |
||||
_, ok1 := rdm.requesting[bn] |
||||
_, ok2 := rdm.processing[bn] |
||||
if !ok1 && !ok2 { |
||||
requestBNs = append(requestBNs, bn) |
||||
bn++ |
||||
break |
||||
} |
||||
bn++ |
||||
} |
||||
} |
||||
return requestBNs |
||||
} |
||||
|
||||
func (rdm *receiptDownloadManager) availableForMoreTasks() bool { |
||||
return len(rdm.requesting) < SoftQueueCap |
||||
} |
||||
|
||||
func (rdm *receiptDownloadManager) addBatchToRequesting(bns []uint64) { |
||||
for _, bn := range bns { |
||||
rdm.requesting[bn] = struct{}{} |
||||
} |
||||
} |
@ -0,0 +1,398 @@ |
||||
package stagedstreamsync |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/harmony-one/harmony/core" |
||||
"github.com/harmony-one/harmony/core/types" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
sttypes "github.com/harmony-one/harmony/p2p/stream/types" |
||||
"github.com/ledgerwatch/erigon-lib/kv" |
||||
"github.com/pkg/errors" |
||||
) |
||||
|
||||
type StageReceipts struct { |
||||
configs StageReceiptsCfg |
||||
} |
||||
|
||||
type StageReceiptsCfg struct { |
||||
bc core.BlockChain |
||||
db kv.RwDB |
||||
blockDBs []kv.RwDB |
||||
concurrency int |
||||
protocol syncProtocol |
||||
isBeacon bool |
||||
logProgress bool |
||||
} |
||||
|
||||
func NewStageReceipts(cfg StageReceiptsCfg) *StageReceipts { |
||||
return &StageReceipts{ |
||||
configs: cfg, |
||||
} |
||||
} |
||||
|
||||
func NewStageReceiptsCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, logProgress bool) StageReceiptsCfg { |
||||
return StageReceiptsCfg{ |
||||
bc: bc, |
||||
db: db, |
||||
blockDBs: blockDBs, |
||||
concurrency: concurrency, |
||||
protocol: protocol, |
||||
isBeacon: isBeacon, |
||||
logProgress: logProgress, |
||||
} |
||||
} |
||||
|
||||
// Exec progresses receipts stage in the forward direction
|
||||
func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { |
||||
|
||||
// only execute this stage in fast/snap sync mode
|
||||
if s.state.status.cycleSyncMode == FullSync { |
||||
return nil |
||||
} |
||||
|
||||
useInternalTx := tx == nil |
||||
|
||||
if invalidBlockRevert { |
||||
return nil |
||||
} |
||||
|
||||
// for short range sync, skip this stage
|
||||
if !s.state.initSync { |
||||
return nil |
||||
} |
||||
|
||||
maxHeight := s.state.status.targetBN |
||||
currentHead := s.state.CurrentBlockNumber() |
||||
if currentHead >= maxHeight { |
||||
return nil |
||||
} |
||||
currProgress := uint64(0) |
||||
targetHeight := s.state.currentCycle.TargetHeight |
||||
|
||||
if errV := CreateView(ctx, r.configs.db, tx, func(etx kv.Tx) error { |
||||
if currProgress, err = s.CurrentStageProgress(etx); err != nil { |
||||
return err |
||||
} |
||||
return nil |
||||
}); errV != nil { |
||||
return errV |
||||
} |
||||
|
||||
if currProgress == 0 { |
||||
currProgress = currentHead |
||||
} |
||||
|
||||
if currProgress >= targetHeight { |
||||
return nil |
||||
} |
||||
|
||||
// size := uint64(0)
|
||||
startTime := time.Now() |
||||
// startBlock := currProgress
|
||||
|
||||
if r.configs.logProgress { |
||||
fmt.Print("\033[s") // save the cursor position
|
||||
} |
||||
|
||||
if useInternalTx { |
||||
var err error |
||||
tx, err = r.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
for { |
||||
// check if there is no any more to download break the loop
|
||||
curBn := s.state.CurrentBlockNumber() |
||||
if curBn == targetHeight { |
||||
break |
||||
} |
||||
|
||||
// calculate the block numbers range to download
|
||||
toBn := curBn + uint64(ReceiptsPerRequest*s.state.config.Concurrency) |
||||
if toBn > targetHeight { |
||||
toBn = targetHeight |
||||
} |
||||
|
||||
// Fetch receipts from connected peers
|
||||
rdm := newReceiptDownloadManager(tx, r.configs.bc, toBn, s.state.logger) |
||||
|
||||
// Setup workers to fetch blocks from remote node
|
||||
var wg sync.WaitGroup |
||||
|
||||
for i := 0; i < s.state.config.Concurrency; i++ { |
||||
wg.Add(1) |
||||
go func() { |
||||
// prepare db transactions
|
||||
txs := make([]kv.RwTx, r.configs.concurrency) |
||||
for i := 0; i < r.configs.concurrency; i++ { |
||||
txs[i], err = r.configs.blockDBs[i].BeginRw(ctx) |
||||
if err != nil { |
||||
return |
||||
} |
||||
} |
||||
// rollback the transactions after worker loop
|
||||
defer func() { |
||||
for i := 0; i < r.configs.concurrency; i++ { |
||||
txs[i].Rollback() |
||||
} |
||||
}() |
||||
|
||||
r.runReceiptWorkerLoop(ctx, rdm, &wg, s, txs, startTime) |
||||
}() |
||||
} |
||||
wg.Wait() |
||||
// insert all downloaded blocks and receipts to chain
|
||||
if err := r.insertBlocksAndReceipts(ctx, rdm, toBn, s); err != nil { |
||||
utils.Logger().Err(err).Msg(WrapStagedSyncMsg("InsertReceiptChain failed")) |
||||
} |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err := tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (r *StageReceipts) insertBlocksAndReceipts(ctx context.Context, rdm *receiptDownloadManager, toBn uint64, s *StageState) error { |
||||
if len(rdm.received) == 0 { |
||||
return nil |
||||
} |
||||
var ( |
||||
bns []uint64 |
||||
blocks []*types.Block |
||||
receipts []types.Receipts |
||||
streamIDs []sttypes.StreamID |
||||
) |
||||
// populate blocks and receipts in separate array
|
||||
// this way helps to sort blocks and receipts by block number
|
||||
for bn := s.state.CurrentBlockNumber() + 1; bn <= toBn; bn++ { |
||||
if received, ok := rdm.received[bn]; !ok { |
||||
return errors.New("some blocks are missing") |
||||
} else { |
||||
bns = append(bns, bn) |
||||
blocks = append(blocks, received.block) |
||||
receipts = append(receipts, received.receipts) |
||||
streamIDs = append(streamIDs, received.streamID) |
||||
} |
||||
} |
||||
// insert sorted blocks and receipts to chain
|
||||
if inserted, err := r.configs.bc.InsertReceiptChain(blocks, receipts); err != nil { |
||||
utils.Logger().Err(err). |
||||
Interface("streams", streamIDs). |
||||
Interface("block numbers", bns). |
||||
Msg(WrapStagedSyncMsg("InsertReceiptChain failed")) |
||||
rdm.HandleRequestError(bns, err) |
||||
return fmt.Errorf("InsertReceiptChain failed: %s", err.Error()) |
||||
} else { |
||||
if inserted != len(blocks) { |
||||
utils.Logger().Warn(). |
||||
Interface("block numbers", bns). |
||||
Int("inserted", inserted). |
||||
Int("blocks to insert", len(blocks)). |
||||
Msg(WrapStagedSyncMsg("InsertReceiptChain couldn't insert all downloaded blocks/receipts")) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// runReceiptWorkerLoop creates a work loop for download receipts
|
||||
func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, s *StageState, txs []kv.RwTx, startTime time.Time) { |
||||
|
||||
currentBlock := int(s.state.CurrentBlockNumber()) |
||||
gbm := s.state.gbm |
||||
|
||||
defer wg.Done() |
||||
|
||||
for { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
default: |
||||
} |
||||
// get next batch of block numbers
|
||||
curHeight := s.state.CurrentBlockNumber() |
||||
batch := rdm.GetNextBatch(curHeight) |
||||
if len(batch) == 0 { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
case <-time.After(100 * time.Millisecond): |
||||
return |
||||
} |
||||
} |
||||
// retrieve corresponding blocks from cache db
|
||||
var hashes []common.Hash |
||||
var blocks []*types.Block |
||||
|
||||
for _, bn := range batch { |
||||
blkKey := marshalData(bn) |
||||
loopID, _, errBDD := gbm.GetDownloadDetails(bn) |
||||
if errBDD != nil { |
||||
utils.Logger().Warn(). |
||||
Err(errBDD). |
||||
Interface("block numbers", bn). |
||||
Msg(WrapStagedSyncMsg("get block download details failed")) |
||||
return |
||||
} |
||||
blockBytes, err := txs[loopID].GetOne(BlocksBucket, blkKey) |
||||
if err != nil { |
||||
return |
||||
} |
||||
sigBytes, err := txs[loopID].GetOne(BlockSignaturesBucket, blkKey) |
||||
if err != nil { |
||||
return |
||||
} |
||||
sz := len(blockBytes) |
||||
if sz <= 1 { |
||||
return |
||||
} |
||||
var block *types.Block |
||||
if err := rlp.DecodeBytes(blockBytes, &block); err != nil { |
||||
return |
||||
} |
||||
if sigBytes != nil { |
||||
block.SetCurrentCommitSig(sigBytes) |
||||
} |
||||
if block.NumberU64() != bn { |
||||
return |
||||
} |
||||
if block.Header().ReceiptHash() == emptyHash { |
||||
return |
||||
} |
||||
// receiptHash := s.state.currentCycle.ReceiptHashes[bn]
|
||||
gbm.SetRootHash(bn, block.Header().Root()) |
||||
hashes = append(hashes, block.Header().Hash()) |
||||
blocks = append(blocks, block) |
||||
} |
||||
|
||||
// download receipts
|
||||
receipts, stid, err := r.downloadReceipts(ctx, hashes) |
||||
if err != nil { |
||||
if !errors.Is(err, context.Canceled) { |
||||
r.configs.protocol.StreamFailed(stid, "downloadRawBlocks failed") |
||||
} |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Interface("block numbers", batch). |
||||
Msg(WrapStagedSyncMsg("downloadRawBlocks failed")) |
||||
err = errors.Wrap(err, "request error") |
||||
rdm.HandleRequestError(batch, err) |
||||
} else { |
||||
// handle request result
|
||||
rdm.HandleRequestResult(batch, receipts, blocks, stid) |
||||
// log progress
|
||||
if r.configs.logProgress { |
||||
//calculating block download speed
|
||||
dt := time.Now().Sub(startTime).Seconds() |
||||
speed := float64(0) |
||||
if dt > 0 { |
||||
speed = float64(len(rdm.rdd)) / dt |
||||
} |
||||
blockReceiptSpeed := fmt.Sprintf("%.2f", speed) |
||||
|
||||
fmt.Print("\033[u\033[K") // restore the cursor position and clear the line
|
||||
fmt.Println("downloaded blocks and receipts:", currentBlock+len(rdm.rdd), "/", int(rdm.targetBN), "(", blockReceiptSpeed, "BlocksAndReceipts/s", ")") |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (r *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) ([]types.Receipts, sttypes.StreamID, error) { |
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second) |
||||
defer cancel() |
||||
|
||||
receipts, stid, err := r.configs.protocol.GetReceipts(ctx, hs) |
||||
if err != nil { |
||||
return nil, stid, err |
||||
} |
||||
if err := validateGetReceiptsResult(hs, receipts); err != nil { |
||||
return nil, stid, err |
||||
} |
||||
return receipts, stid, nil |
||||
} |
||||
|
||||
func validateGetReceiptsResult(requested []common.Hash, result []types.Receipts) error { |
||||
// TODO: validate each receipt here
|
||||
|
||||
return nil |
||||
} |
||||
|
||||
func (r *StageReceipts) saveProgress(ctx context.Context, s *StageState, progress uint64, tx kv.RwTx) (err error) { |
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
var err error |
||||
tx, err = r.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
// save progress
|
||||
if err = s.Update(tx, progress); err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Msgf("[STAGED_SYNC] saving progress for receipt stage failed") |
||||
return ErrSavingBodiesProgressFail |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err := tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (r *StageReceipts) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { |
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
tx, err = r.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
if err = u.Done(tx); err != nil { |
||||
return err |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err = tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (r *StageReceipts) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { |
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
tx, err = r.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err = tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,310 @@ |
||||
package stagedstreamsync |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/harmony-one/harmony/core" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
sttypes "github.com/harmony-one/harmony/p2p/stream/types" |
||||
"github.com/ledgerwatch/erigon-lib/kv" |
||||
"github.com/pkg/errors" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/rs/zerolog" |
||||
) |
||||
|
||||
type StageStateSync struct { |
||||
configs StageStateSyncCfg |
||||
} |
||||
|
||||
type StageStateSyncCfg struct { |
||||
bc core.BlockChain |
||||
db kv.RwDB |
||||
concurrency int |
||||
protocol syncProtocol |
||||
logger zerolog.Logger |
||||
logProgress bool |
||||
} |
||||
|
||||
func NewStageStateSync(cfg StageStateSyncCfg) *StageStateSync { |
||||
return &StageStateSync{ |
||||
configs: cfg, |
||||
} |
||||
} |
||||
|
||||
func NewStageStateSyncCfg(bc core.BlockChain, |
||||
db kv.RwDB, |
||||
concurrency int, |
||||
protocol syncProtocol, |
||||
logger zerolog.Logger, |
||||
logProgress bool) StageStateSyncCfg { |
||||
|
||||
return StageStateSyncCfg{ |
||||
bc: bc, |
||||
db: db, |
||||
concurrency: concurrency, |
||||
protocol: protocol, |
||||
logger: logger, |
||||
logProgress: logProgress, |
||||
} |
||||
} |
||||
|
||||
// Exec progresses States stage in the forward direction
|
||||
func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { |
||||
|
||||
// for short range sync, skip this step
|
||||
if !s.state.initSync { |
||||
return nil |
||||
} // only execute this stage in fast/snap sync mode and once we reach to pivot
|
||||
|
||||
if s.state.status.pivotBlock == nil || |
||||
s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() || |
||||
s.state.status.statesSynced { |
||||
return nil |
||||
} |
||||
|
||||
// maxHeight := s.state.status.targetBN
|
||||
// currentHead := s.state.CurrentBlockNumber()
|
||||
// if currentHead >= maxHeight {
|
||||
// return nil
|
||||
// }
|
||||
// currProgress := s.state.CurrentBlockNumber()
|
||||
// targetHeight := s.state.currentCycle.TargetHeight
|
||||
|
||||
// if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error {
|
||||
// if currProgress, err = s.CurrentStageProgress(etx); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return nil
|
||||
// }); errV != nil {
|
||||
// return errV
|
||||
// }
|
||||
|
||||
// if currProgress >= targetHeight {
|
||||
// return nil
|
||||
// }
|
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
var err error |
||||
tx, err = sss.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
// isLastCycle := targetHeight >= maxHeight
|
||||
startTime := time.Now() |
||||
|
||||
if sss.configs.logProgress { |
||||
fmt.Print("\033[s") // save the cursor position
|
||||
} |
||||
|
||||
// Fetch states from neighbors
|
||||
// pivotRootHash := s.state.status.pivotBlock.Root()
|
||||
currentBlockRootHash := s.state.bc.CurrentFastBlock().Root() |
||||
sdm := newStateDownloadManager(tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) |
||||
sdm.setRootHash(currentBlockRootHash) |
||||
var wg sync.WaitGroup |
||||
for i := 0; i < s.state.config.Concurrency; i++ { |
||||
wg.Add(1) |
||||
go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) |
||||
} |
||||
wg.Wait() |
||||
|
||||
// insert block
|
||||
if err := sss.configs.bc.WriteHeadBlock(s.state.status.pivotBlock); err != nil { |
||||
sss.configs.logger.Warn().Err(err). |
||||
Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()). |
||||
Msg(WrapStagedSyncMsg("insert pivot block failed")) |
||||
// TODO: panic("pivot block is failed to insert in chain.")
|
||||
return err |
||||
} |
||||
|
||||
// states should be fully synced in this stage
|
||||
s.state.status.statesSynced = true |
||||
|
||||
/* |
||||
gbm := s.state.gbm |
||||
|
||||
// Setup workers to fetch states from remote node
|
||||
var wg sync.WaitGroup |
||||
curHeight := s.state.CurrentBlockNumber() |
||||
|
||||
for bn := curHeight + 1; bn <= gbm.targetBN; bn++ { |
||||
root := gbm.GetRootHash(bn) |
||||
if root == emptyHash { |
||||
continue |
||||
} |
||||
sdm.setRootHash(root) |
||||
for i := 0; i < s.state.config.Concurrency; i++ { |
||||
wg.Add(1) |
||||
go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) |
||||
} |
||||
wg.Wait() |
||||
} |
||||
*/ |
||||
|
||||
if useInternalTx { |
||||
if err := tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// runStateWorkerLoop creates a work loop for download states
|
||||
func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time, s *StageState) { |
||||
|
||||
defer wg.Done() |
||||
|
||||
for { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
default: |
||||
} |
||||
nodes, paths, codes, err := sdm.GetNextBatch() |
||||
if len(nodes)+len(codes) == 0 || err != nil { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
case <-time.After(100 * time.Millisecond): |
||||
return |
||||
} |
||||
} |
||||
data, stid, err := sss.downloadStates(ctx, nodes, codes) |
||||
if err != nil { |
||||
if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { |
||||
sss.configs.protocol.StreamFailed(stid, "downloadStates failed") |
||||
} |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("downloadStates failed")) |
||||
err = errors.Wrap(err, "request error") |
||||
sdm.HandleRequestError(codes, paths, stid, err) |
||||
} else if data == nil || len(data) == 0 { |
||||
utils.Logger().Warn(). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("downloadStates failed, received empty data bytes")) |
||||
err := errors.New("downloadStates received empty data bytes") |
||||
sdm.HandleRequestError(codes, paths, stid, err) |
||||
} else { |
||||
sdm.HandleRequestResult(nodes, paths, data, loopID, stid) |
||||
if sss.configs.logProgress { |
||||
//calculating block download speed
|
||||
dt := time.Now().Sub(startTime).Seconds() |
||||
speed := float64(0) |
||||
if dt > 0 { |
||||
speed = float64(len(data)) / dt |
||||
} |
||||
stateDownloadSpeed := fmt.Sprintf("%.2f", speed) |
||||
|
||||
fmt.Print("\033[u\033[K") // restore the cursor position and clear the line
|
||||
fmt.Println("state download speed:", stateDownloadSpeed, "states/s") |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (sss *StageStateSync) downloadStates(ctx context.Context, nodes []common.Hash, codes []common.Hash) ([][]byte, sttypes.StreamID, error) { |
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second) |
||||
defer cancel() |
||||
|
||||
hashes := append(codes, nodes...) |
||||
data, stid, err := sss.configs.protocol.GetNodeData(ctx, hashes) |
||||
if err != nil { |
||||
return nil, stid, err |
||||
} |
||||
if err := validateGetNodeDataResult(hashes, data); err != nil { |
||||
return nil, stid, err |
||||
} |
||||
return data, stid, nil |
||||
} |
||||
|
||||
func validateGetNodeDataResult(requested []common.Hash, result [][]byte) error { |
||||
if len(result) != len(requested) { |
||||
return fmt.Errorf("unexpected number of nodes delivered: %v / %v", len(result), len(requested)) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (stg *StageStateSync) insertChain(gbm *blockDownloadManager, |
||||
protocol syncProtocol, |
||||
lbls prometheus.Labels, |
||||
targetBN uint64) { |
||||
|
||||
} |
||||
|
||||
func (stg *StageStateSync) saveProgress(s *StageState, tx kv.RwTx) (err error) { |
||||
|
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
var err error |
||||
tx, err = stg.configs.db.BeginRw(context.Background()) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
// save progress
|
||||
if err = s.Update(tx, s.state.CurrentBlockNumber()); err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Msgf("[STAGED_SYNC] saving progress for block States stage failed") |
||||
return ErrSaveStateProgressFail |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err := tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (stg *StageStateSync) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { |
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
tx, err = stg.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
if err = u.Done(tx); err != nil { |
||||
return err |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err = tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (stg *StageStateSync) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { |
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
tx, err = stg.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err = tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,469 @@ |
||||
package stagedstreamsync |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/harmony-one/harmony/core" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
sttypes "github.com/harmony-one/harmony/p2p/stream/types" |
||||
"github.com/pkg/errors" |
||||
|
||||
//sttypes "github.com/harmony-one/harmony/p2p/stream/types"
|
||||
"github.com/ledgerwatch/erigon-lib/kv" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/rs/zerolog" |
||||
) |
||||
|
||||
type StageFullStateSync struct { |
||||
configs StageFullStateSyncCfg |
||||
} |
||||
|
||||
type StageFullStateSyncCfg struct { |
||||
bc core.BlockChain |
||||
db kv.RwDB |
||||
concurrency int |
||||
protocol syncProtocol |
||||
logger zerolog.Logger |
||||
logProgress bool |
||||
} |
||||
|
||||
func NewStageFullStateSync(cfg StageFullStateSyncCfg) *StageFullStateSync { |
||||
return &StageFullStateSync{ |
||||
configs: cfg, |
||||
} |
||||
} |
||||
|
||||
func NewStageFullStateSyncCfg(bc core.BlockChain, |
||||
db kv.RwDB, |
||||
concurrency int, |
||||
protocol syncProtocol, |
||||
logger zerolog.Logger, |
||||
logProgress bool) StageFullStateSyncCfg { |
||||
|
||||
return StageFullStateSyncCfg{ |
||||
bc: bc, |
||||
db: db, |
||||
concurrency: concurrency, |
||||
protocol: protocol, |
||||
logger: logger, |
||||
logProgress: logProgress, |
||||
} |
||||
} |
||||
|
||||
// Exec progresses States stage in the forward direction
|
||||
func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { |
||||
|
||||
// for short range sync, skip this step
|
||||
if !s.state.initSync { |
||||
return nil |
||||
} // only execute this stage in fast/snap sync mode and once we reach to pivot
|
||||
|
||||
if s.state.status.pivotBlock == nil || |
||||
s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() || |
||||
s.state.status.statesSynced { |
||||
return nil |
||||
} |
||||
|
||||
// maxHeight := s.state.status.targetBN
|
||||
// currentHead := s.state.CurrentBlockNumber()
|
||||
// if currentHead >= maxHeight {
|
||||
// return nil
|
||||
// }
|
||||
// currProgress := s.state.CurrentBlockNumber()
|
||||
// targetHeight := s.state.currentCycle.TargetHeight
|
||||
|
||||
// if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error {
|
||||
// if currProgress, err = s.CurrentStageProgress(etx); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return nil
|
||||
// }); errV != nil {
|
||||
// return errV
|
||||
// }
|
||||
|
||||
// if currProgress >= targetHeight {
|
||||
// return nil
|
||||
// }
|
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
var err error |
||||
tx, err = sss.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
// isLastCycle := targetHeight >= maxHeight
|
||||
startTime := time.Now() |
||||
|
||||
if sss.configs.logProgress { |
||||
fmt.Print("\033[s") // save the cursor position
|
||||
} |
||||
|
||||
// Fetch states from neighbors
|
||||
currentBlockRootHash := s.state.bc.CurrentFastBlock().Root() |
||||
scheme := sss.configs.bc.TrieDB().Scheme() |
||||
sdm := newFullStateDownloadManager(sss.configs.bc.ChainDb(), scheme, tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) |
||||
sdm.setRootHash(currentBlockRootHash) |
||||
var wg sync.WaitGroup |
||||
for i := 0; i < s.state.config.Concurrency; i++ { |
||||
wg.Add(1) |
||||
go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) |
||||
} |
||||
wg.Wait() |
||||
|
||||
// insert block
|
||||
if err := sss.configs.bc.WriteHeadBlock(s.state.status.pivotBlock); err != nil { |
||||
sss.configs.logger.Warn().Err(err). |
||||
Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()). |
||||
Msg(WrapStagedSyncMsg("insert pivot block failed")) |
||||
// TODO: panic("pivot block is failed to insert in chain.")
|
||||
return err |
||||
} |
||||
|
||||
// states should be fully synced in this stage
|
||||
s.state.status.statesSynced = true |
||||
|
||||
/* |
||||
gbm := s.state.gbm |
||||
|
||||
// Setup workers to fetch states from remote node
|
||||
var wg sync.WaitGroup |
||||
curHeight := s.state.CurrentBlockNumber() |
||||
|
||||
for bn := curHeight + 1; bn <= gbm.targetBN; bn++ { |
||||
root := gbm.GetRootHash(bn) |
||||
if root == emptyHash { |
||||
continue |
||||
} |
||||
sdm.setRootHash(root) |
||||
for i := 0; i < s.state.config.Concurrency; i++ { |
||||
wg.Add(1) |
||||
go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) |
||||
} |
||||
wg.Wait() |
||||
} |
||||
*/ |
||||
|
||||
if useInternalTx { |
||||
if err := tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// runStateWorkerLoop creates a work loop for download states
|
||||
func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *FullStateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time, s *StageState) { |
||||
|
||||
defer wg.Done() |
||||
|
||||
for { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
default: |
||||
} |
||||
accountTasks, codes, storages, healtask, codetask, err := sdm.GetNextBatch() |
||||
if len(accountTasks)+len(codes)+len(storages.accounts)+len(healtask.hashes)+len(codetask.hashes) == 0 || err != nil { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
case <-time.After(100 * time.Millisecond): |
||||
return |
||||
} |
||||
} |
||||
|
||||
if len(accountTasks) > 0 { |
||||
|
||||
task := accountTasks[0] |
||||
origin := task.Next |
||||
limit := task.Last |
||||
root := sdm.root |
||||
cap := maxRequestSize |
||||
retAccounts, proof, stid, err := sss.configs.protocol.GetAccountRange(ctx, root, origin, limit, uint64(cap)) |
||||
if err != nil { |
||||
if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { |
||||
sss.configs.protocol.StreamFailed(stid, "GetAccountRange failed") |
||||
} |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetAccountRange failed")) |
||||
err = errors.Wrap(err, "request error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} else if retAccounts == nil || len(retAccounts) == 0 { |
||||
utils.Logger().Warn(). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetAccountRange failed, received empty accounts")) |
||||
err := errors.New("GetAccountRange received empty slots") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
if err := sdm.HandleAccountRequestResult(task, retAccounts, proof, origin[:], limit[:], loopID, stid); err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetAccountRange handle result failed")) |
||||
err = errors.Wrap(err, "handle result error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
|
||||
} else if len(codes) > 0 { |
||||
|
||||
stid, err := sss.downloadByteCodes(ctx, sdm, codes, loopID) |
||||
if err != nil { |
||||
if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { |
||||
sss.configs.protocol.StreamFailed(stid, "downloadByteCodes failed") |
||||
} |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("downloadByteCodes failed")) |
||||
err = errors.Wrap(err, "request error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
|
||||
} else if len(storages.accounts) > 0 { |
||||
|
||||
root := sdm.root |
||||
roots := storages.roots |
||||
accounts := storages.accounts |
||||
cap := maxRequestSize |
||||
origin := storages.origin |
||||
limit := storages.limit |
||||
mainTask := storages.mainTask |
||||
subTask := storages.subtask |
||||
|
||||
slots, proof, stid, err := sss.configs.protocol.GetStorageRanges(ctx, root, accounts, origin, limit, uint64(cap)) |
||||
if err != nil { |
||||
if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { |
||||
sss.configs.protocol.StreamFailed(stid, "GetStorageRanges failed") |
||||
} |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetStorageRanges failed")) |
||||
err = errors.Wrap(err, "request error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} else if slots == nil || len(slots) == 0 { |
||||
utils.Logger().Warn(). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetStorageRanges failed, received empty slots")) |
||||
err := errors.New("GetStorageRanges received empty slots") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
if err := sdm.HandleStorageRequestResult(mainTask, subTask, accounts, roots, origin, limit, slots, proof, loopID, stid); err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetStorageRanges handle result failed")) |
||||
err = errors.Wrap(err, "handle result error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
|
||||
} else { |
||||
// assign trie node Heal Tasks
|
||||
if len(healtask.hashes) > 0 { |
||||
root := sdm.root |
||||
task := healtask.task |
||||
hashes := healtask.hashes |
||||
pathsets := healtask.pathsets |
||||
paths := healtask.paths |
||||
|
||||
nodes, stid, err := sss.configs.protocol.GetTrieNodes(ctx, root, pathsets, maxRequestSize) |
||||
if err != nil { |
||||
if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { |
||||
sss.configs.protocol.StreamFailed(stid, "GetTrieNodes failed") |
||||
} |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetTrieNodes failed")) |
||||
err = errors.Wrap(err, "request error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} else if nodes == nil || len(nodes) == 0 { |
||||
utils.Logger().Warn(). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetTrieNodes failed, received empty nodes")) |
||||
err := errors.New("GetTrieNodes received empty nodes") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
if err := sdm.HandleTrieNodeHealRequestResult(task, paths, hashes, nodes, loopID, stid); err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetTrieNodes handle result failed")) |
||||
err = errors.Wrap(err, "handle result error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
} |
||||
|
||||
if len(codetask.hashes) > 0 { |
||||
task := codetask.task |
||||
hashes := codetask.hashes |
||||
retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, hashes, maxRequestSize) |
||||
if err != nil { |
||||
if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { |
||||
sss.configs.protocol.StreamFailed(stid, "GetByteCodes failed") |
||||
} |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetByteCodes failed")) |
||||
err = errors.Wrap(err, "request error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} else if retCodes == nil || len(retCodes) == 0 { |
||||
utils.Logger().Warn(). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetByteCodes failed, received empty codes")) |
||||
err := errors.New("GetByteCodes received empty codes") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
if err := sdm.HandleBytecodeRequestResult(task, hashes, retCodes, loopID, stid); err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Str("stream", string(stid)). |
||||
Msg(WrapStagedSyncMsg("GetByteCodes handle result failed")) |
||||
err = errors.Wrap(err, "handle result error") |
||||
sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) |
||||
return |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (sss *StageFullStateSync) downloadByteCodes(ctx context.Context, sdm *FullStateDownloadManager, codeTasks []*byteCodeTasksBundle, loopID int) (stid sttypes.StreamID, err error) { |
||||
for _, codeTask := range codeTasks { |
||||
// try to get byte codes from remote peer
|
||||
// if any of them failed, the stid will be the id of the failed stream
|
||||
retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, codeTask.hashes, maxRequestSize) |
||||
if err != nil { |
||||
return stid, err |
||||
} |
||||
if len(retCodes) == 0 { |
||||
return stid, errors.New("empty codes array") |
||||
} |
||||
if err = sdm.HandleBytecodeRequestResult(codeTask.task, codeTask.hashes, retCodes, loopID, stid); err != nil { |
||||
return stid, err |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// func (sss *StageFullStateSync) downloadStates(ctx context.Context,
|
||||
// root common.Hash,
|
||||
// origin common.Hash,
|
||||
// accounts []*accountTask,
|
||||
// codes []common.Hash,
|
||||
// storages *storageTaskBundle) ([][]byte, sttypes.StreamID, error) {
|
||||
|
||||
// ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
// // if there is any account task, first we have to complete that
|
||||
// if len(accounts) > 0 {
|
||||
|
||||
// }
|
||||
// // hashes := append(codes, nodes...)
|
||||
// // data, stid, err := sss.configs.protocol.GetNodeData(ctx, hashes)
|
||||
// // if err != nil {
|
||||
// // return nil, stid, err
|
||||
// // }
|
||||
// // if err := validateGetNodeDataResult(hashes, data); err != nil {
|
||||
// // return nil, stid, err
|
||||
// // }
|
||||
// return data, stid, nil
|
||||
// }
|
||||
|
||||
func (stg *StageFullStateSync) insertChain(gbm *blockDownloadManager, |
||||
protocol syncProtocol, |
||||
lbls prometheus.Labels, |
||||
targetBN uint64) { |
||||
|
||||
} |
||||
|
||||
func (stg *StageFullStateSync) saveProgress(s *StageState, tx kv.RwTx) (err error) { |
||||
|
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
var err error |
||||
tx, err = stg.configs.db.BeginRw(context.Background()) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
// save progress
|
||||
if err = s.Update(tx, s.state.CurrentBlockNumber()); err != nil { |
||||
utils.Logger().Error(). |
||||
Err(err). |
||||
Msgf("[STAGED_SYNC] saving progress for block States stage failed") |
||||
return ErrSaveStateProgressFail |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err := tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (stg *StageFullStateSync) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { |
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
tx, err = stg.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
if err = u.Done(tx); err != nil { |
||||
return err |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err = tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (stg *StageFullStateSync) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { |
||||
useInternalTx := tx == nil |
||||
if useInternalTx { |
||||
tx, err = stg.configs.db.BeginRw(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer tx.Rollback() |
||||
} |
||||
|
||||
if useInternalTx { |
||||
if err = tx.Commit(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,432 @@ |
||||
package stagedstreamsync |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/harmony-one/harmony/core" |
||||
"github.com/harmony-one/harmony/core/rawdb" |
||||
"github.com/harmony-one/harmony/core/state" |
||||
"github.com/harmony-one/harmony/internal/utils" |
||||
sttypes "github.com/harmony-one/harmony/p2p/stream/types" |
||||
"github.com/ledgerwatch/erigon-lib/kv" |
||||
"github.com/rs/zerolog" |
||||
"golang.org/x/crypto/sha3" |
||||
) |
||||
|
||||
// codeTask represents a single byte code download task, containing a set of
|
||||
// peers already attempted retrieval from to detect stalled syncs and abort.
|
||||
type codeTask struct { |
||||
attempts map[sttypes.StreamID]int |
||||
} |
||||
|
||||
// trieTask represents a single trie node download task, containing a set of
|
||||
// peers already attempted retrieval from to detect stalled syncs and abort.
|
||||
type trieTask struct { |
||||
hash common.Hash |
||||
path [][]byte |
||||
attempts map[sttypes.StreamID]int |
||||
} |
||||
|
||||
type task struct { |
||||
trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path
|
||||
codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash
|
||||
} |
||||
|
||||
func newTask() *task { |
||||
return &task{ |
||||
trieTasks: make(map[string]*trieTask), |
||||
codeTasks: make(map[common.Hash]*codeTask), |
||||
} |
||||
} |
||||
|
||||
func (t *task) addCodeTask(h common.Hash, ct *codeTask) { |
||||
t.codeTasks[h] = &codeTask{ |
||||
attempts: ct.attempts, |
||||
} |
||||
} |
||||
|
||||
func (t *task) getCodeTask(h common.Hash) *codeTask { |
||||
if task, ok := t.codeTasks[h]; ok { |
||||
return task |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (t *task) addNewCodeTask(h common.Hash) { |
||||
t.codeTasks[h] = &codeTask{ |
||||
attempts: make(map[sttypes.StreamID]int), |
||||
} |
||||
} |
||||
|
||||
func (t *task) deleteCodeTask(hash common.Hash) { |
||||
if _, ok := t.codeTasks[hash]; ok { |
||||
delete(t.codeTasks, hash) |
||||
} |
||||
} |
||||
|
||||
func (t *task) deleteCodeTaskAttempts(h common.Hash, stID sttypes.StreamID) { |
||||
if task, ok := t.codeTasks[h]; ok { |
||||
if _, ok := task.attempts[stID]; ok { |
||||
delete(t.codeTasks[h].attempts, stID) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (t *task) addTrieTask(path string, tt *trieTask) { |
||||
t.trieTasks[path] = &trieTask{ |
||||
hash: tt.hash, |
||||
path: tt.path, |
||||
attempts: tt.attempts, |
||||
} |
||||
} |
||||
|
||||
func (t *task) getTrieTask(path string) *trieTask { |
||||
if task, ok := t.trieTasks[path]; ok { |
||||
return task |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (t *task) addNewTrieTask(hash common.Hash, path string) { |
||||
t.trieTasks[path] = &trieTask{ |
||||
hash: hash, |
||||
path: trie.NewSyncPath([]byte(path)), |
||||
attempts: make(map[sttypes.StreamID]int), |
||||
} |
||||
} |
||||
|
||||
func (t *task) deleteTrieTask(path string) { |
||||
if _, ok := t.trieTasks[path]; ok { |
||||
delete(t.trieTasks, path) |
||||
} |
||||
} |
||||
|
||||
func (t *task) deleteTrieTaskAttempts(path string, stID sttypes.StreamID) { |
||||
if task, ok := t.trieTasks[path]; ok { |
||||
if _, ok := task.attempts[stID]; ok { |
||||
delete(t.trieTasks[path].attempts, stID) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// StateDownloadManager is the helper structure for get blocks request management
|
||||
type StateDownloadManager struct { |
||||
bc core.BlockChain |
||||
tx kv.RwTx |
||||
|
||||
protocol syncProtocol |
||||
root common.Hash // State root currently being synced
|
||||
sched *trie.Sync // State trie sync scheduler defining the tasks
|
||||
keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
|
||||
concurrency int |
||||
logger zerolog.Logger |
||||
lock sync.Mutex |
||||
|
||||
numUncommitted int |
||||
bytesUncommitted int |
||||
|
||||
tasks *task |
||||
requesting *task |
||||
processing *task |
||||
retries *task |
||||
} |
||||
|
||||
func newStateDownloadManager(tx kv.RwTx, |
||||
bc core.BlockChain, |
||||
concurrency int, |
||||
logger zerolog.Logger) *StateDownloadManager { |
||||
|
||||
return &StateDownloadManager{ |
||||
bc: bc, |
||||
tx: tx, |
||||
keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), |
||||
concurrency: concurrency, |
||||
logger: logger, |
||||
tasks: newTask(), |
||||
requesting: newTask(), |
||||
processing: newTask(), |
||||
retries: newTask(), |
||||
} |
||||
} |
||||
|
||||
func (s *StateDownloadManager) setRootHash(root common.Hash) { |
||||
s.root = root |
||||
s.sched = state.NewStateSync(root, s.bc.ChainDb(), nil, rawdb.HashScheme) |
||||
} |
||||
|
||||
// fillTasks fills the tasks to send to the remote peer.
|
||||
func (s *StateDownloadManager) fillTasks(n int) error { |
||||
if fill := n - (len(s.tasks.trieTasks) + len(s.tasks.codeTasks)); fill > 0 { |
||||
paths, hashes, codes := s.sched.Missing(fill) |
||||
for i, path := range paths { |
||||
s.tasks.addNewTrieTask(hashes[i], path) |
||||
} |
||||
for _, hash := range codes { |
||||
s.tasks.addNewCodeTask(hash) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// getNextBatch returns objects with a maximum of n state download
|
||||
// tasks to send to the remote peer.
|
||||
func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []string, codes []common.Hash, err error) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
cap := StatesPerRequest |
||||
|
||||
nodes, paths, codes = s.getBatchFromRetries(cap) |
||||
nItems := len(nodes) + len(codes) |
||||
cap -= nItems |
||||
|
||||
if cap > 0 { |
||||
// Refill available tasks from the scheduler.
|
||||
if s.sched.Pending() == 0 { |
||||
return |
||||
} |
||||
|
||||
if err = s.commit(false); err != nil { |
||||
return |
||||
} |
||||
|
||||
if err = s.fillTasks(cap); err != nil { |
||||
return |
||||
} |
||||
newNodes, newPaths, newCodes := s.getBatchFromUnprocessed(cap) |
||||
nodes = append(nodes, newNodes...) |
||||
paths = append(paths, newPaths...) |
||||
codes = append(codes, newCodes...) |
||||
} |
||||
return |
||||
} |
||||
|
||||
func (s *StateDownloadManager) commit(force bool) error { |
||||
if !force && s.bytesUncommitted < ethdb.IdealBatchSize { |
||||
return nil |
||||
} |
||||
start := time.Now() |
||||
b := s.bc.ChainDb().NewBatch() |
||||
if err := s.sched.Commit(b); err != nil { |
||||
return err |
||||
} |
||||
if err := b.Write(); err != nil { |
||||
return fmt.Errorf("DB write error: %v", err) |
||||
} |
||||
s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) |
||||
s.numUncommitted = 0 |
||||
s.bytesUncommitted = 0 |
||||
return nil |
||||
} |
||||
|
||||
// updateStats bumps the various state sync progress counters and displays a log
|
||||
// message for the user to see.
|
||||
func (s *StateDownloadManager) updateStats(written, duplicate, unexpected int, duration time.Duration) { |
||||
// TODO: here it updates the stats for total pending, processed, duplicates and unexpected
|
||||
|
||||
// for now, we just jog current stats
|
||||
if written > 0 || duplicate > 0 || unexpected > 0 { |
||||
utils.Logger().Info(). |
||||
Int("count", written). |
||||
Int("duplicate", duplicate). |
||||
Int("unexpected", unexpected). |
||||
Msg("Imported new state entries") |
||||
} |
||||
} |
||||
|
||||
// getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download
|
||||
// tasks to send to the remote peer.
|
||||
func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Hash, paths []string, codes []common.Hash) { |
||||
// over trie nodes as those can be written to disk and forgotten about.
|
||||
nodes = make([]common.Hash, 0, n) |
||||
paths = make([]string, 0, n) |
||||
codes = make([]common.Hash, 0, n) |
||||
|
||||
for hash, t := range s.tasks.codeTasks { |
||||
// Stop when we've gathered enough requests
|
||||
if len(nodes)+len(codes) == n { |
||||
break |
||||
} |
||||
codes = append(codes, hash) |
||||
s.requesting.addCodeTask(hash, t) |
||||
s.tasks.deleteCodeTask(hash) |
||||
} |
||||
for path, t := range s.tasks.trieTasks { |
||||
// Stop when we've gathered enough requests
|
||||
if len(nodes)+len(codes) == n { |
||||
break |
||||
} |
||||
nodes = append(nodes, t.hash) |
||||
paths = append(paths, path) |
||||
s.requesting.addTrieTask(path, t) |
||||
s.tasks.deleteTrieTask(path) |
||||
} |
||||
return nodes, paths, codes |
||||
} |
||||
|
||||
// getBatchFromRetries get the block number batch to be requested from retries.
|
||||
func (s *StateDownloadManager) getBatchFromRetries(n int) ([]common.Hash, []string, []common.Hash) { |
||||
// over trie nodes as those can be written to disk and forgotten about.
|
||||
nodes := make([]common.Hash, 0, n) |
||||
paths := make([]string, 0, n) |
||||
codes := make([]common.Hash, 0, n) |
||||
|
||||
for hash, t := range s.retries.codeTasks { |
||||
// Stop when we've gathered enough requests
|
||||
if len(nodes)+len(codes) == n { |
||||
break |
||||
} |
||||
codes = append(codes, hash) |
||||
s.requesting.addCodeTask(hash, t) |
||||
s.retries.deleteCodeTask(hash) |
||||
} |
||||
for path, t := range s.retries.trieTasks { |
||||
// Stop when we've gathered enough requests
|
||||
if len(nodes)+len(codes) == n { |
||||
break |
||||
} |
||||
nodes = append(nodes, t.hash) |
||||
paths = append(paths, path) |
||||
s.requesting.addTrieTask(path, t) |
||||
s.retries.deleteTrieTask(path) |
||||
} |
||||
return nodes, paths, codes |
||||
} |
||||
|
||||
// HandleRequestError handles the error result
|
||||
func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, triePaths []string, streamID sttypes.StreamID, err error) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
// add requested code hashes to retries
|
||||
for _, h := range codeHashes { |
||||
task := s.requesting.getCodeTask(h) |
||||
s.retries.addCodeTask(h, task) |
||||
s.requesting.deleteCodeTask(h) |
||||
} |
||||
|
||||
// add requested trie paths to retries
|
||||
for _, path := range triePaths { |
||||
task := s.requesting.getTrieTask(path) |
||||
s.retries.addTrieTask(path, task) |
||||
s.requesting.deleteTrieTask(path) |
||||
} |
||||
} |
||||
|
||||
// HandleRequestResult handles get trie paths and code hashes result
|
||||
func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, triePaths []string, response [][]byte, loopID int, streamID sttypes.StreamID) error { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
// Collect processing stats and update progress if valid data was received
|
||||
duplicate, unexpected, successful := 0, 0, 0 |
||||
|
||||
for _, blob := range response { |
||||
hash, err := s.processNodeData(codeHashes, triePaths, blob) |
||||
switch err { |
||||
case nil: |
||||
s.numUncommitted++ |
||||
s.bytesUncommitted += len(blob) |
||||
successful++ |
||||
case trie.ErrNotRequested: |
||||
unexpected++ |
||||
case trie.ErrAlreadyProcessed: |
||||
duplicate++ |
||||
default: |
||||
return fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) |
||||
} |
||||
} |
||||
|
||||
for _, path := range triePaths { |
||||
task := s.requesting.getTrieTask(path) |
||||
if task == nil { |
||||
// it is already removed from requesting
|
||||
// either it has been completed and deleted by processNodeData or it does not exist
|
||||
continue |
||||
} |
||||
// If the node did deliver something, missing items may be due to a protocol
|
||||
// limit or a previous timeout + delayed delivery. Both cases should permit
|
||||
// the node to retry the missing items (to avoid single-peer stalls).
|
||||
if len(response) > 0 { //TODO: if timeout also do same
|
||||
s.requesting.deleteTrieTaskAttempts(path, streamID) |
||||
} else if task.attempts[streamID] >= MaxTriesToFetchNodeData { |
||||
// If we've requested the node too many times already, it may be a malicious
|
||||
// sync where nobody has the right data. Abort.
|
||||
return fmt.Errorf("trie node %s failed with peer %s (%d tries)", task.hash.TerminalString(), streamID, task.attempts[streamID]) |
||||
} |
||||
// Missing item, place into the retry queue.
|
||||
s.retries.addTrieTask(path, task) |
||||
s.requesting.deleteTrieTask(path) |
||||
} |
||||
|
||||
for _, hash := range codeHashes { |
||||
task := s.requesting.getCodeTask(hash) |
||||
if task == nil { |
||||
// it is already removed from requesting
|
||||
// either it has been completed and deleted by processNodeData or it does not exist
|
||||
continue |
||||
} |
||||
// If the node did deliver something, missing items may be due to a protocol
|
||||
// limit or a previous timeout + delayed delivery. Both cases should permit
|
||||
// the node to retry the missing items (to avoid single-peer stalls).
|
||||
if len(response) > 0 { //TODO: if timeout also do same
|
||||
s.requesting.deleteCodeTaskAttempts(hash, streamID) //TODO: do we need delete attempts???
|
||||
} else if task.attempts[streamID] >= MaxTriesToFetchNodeData { |
||||
// If we've requested the node too many times already, it may be a malicious
|
||||
// sync where nobody has the right data. Abort.
|
||||
return fmt.Errorf("byte code %s failed with peer %s (%d tries)", hash.TerminalString(), streamID, task.attempts[streamID]) |
||||
} |
||||
// Missing item, place into the retry queue.
|
||||
s.retries.addCodeTask(hash, task) |
||||
s.requesting.deleteCodeTask(hash) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// processNodeData tries to inject a trie node data blob delivered from a remote
|
||||
// peer into the state trie, returning whether anything useful was written or any
|
||||
// error occurred.
|
||||
//
|
||||
// If multiple requests correspond to the same hash, this method will inject the
|
||||
// blob as a result for the first one only, leaving the remaining duplicates to
|
||||
// be fetched again.
|
||||
func (s *StateDownloadManager) processNodeData(codeHashes []common.Hash, triePaths []string, responseData []byte) (common.Hash, error) { |
||||
var hash common.Hash |
||||
s.keccak.Reset() |
||||
s.keccak.Write(responseData) |
||||
s.keccak.Read(hash[:]) |
||||
|
||||
//TODO: remove from requesting
|
||||
if _, present := s.requesting.codeTasks[hash]; present { |
||||
err := s.sched.ProcessCode(trie.CodeSyncResult{ |
||||
Hash: hash, |
||||
Data: responseData, |
||||
}) |
||||
s.requesting.deleteCodeTask(hash) |
||||
return hash, err |
||||
} |
||||
for _, path := range triePaths { |
||||
task := s.requesting.getTrieTask(path) |
||||
if task == nil { |
||||
// this shouldn't happen while the path is given from triPaths and triPaths
|
||||
// are given from requesting queue
|
||||
continue |
||||
} |
||||
if task.hash == hash { |
||||
err := s.sched.ProcessNode(trie.NodeSyncResult{ |
||||
Path: path, |
||||
Data: responseData, |
||||
}) |
||||
s.requesting.deleteTrieTask(path) |
||||
return hash, err |
||||
} |
||||
} |
||||
return common.Hash{}, trie.ErrNotRequested |
||||
} |
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue