diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go index f82bff572..a41d2e859 100644 --- a/api/service/stagedstreamsync/const.go +++ b/api/service/stagedstreamsync/const.go @@ -27,7 +27,7 @@ const ( StatesPerRequest int = 100 // maximum number of blocks for get receipts request - ReceiptsPerRequest int = 10 + ReceiptsPerRequest int = 10 // DefaultConcurrency is the default settings for concurrency DefaultConcurrency int = 4 diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 0a14d0cb3..fea59b02a 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -59,23 +59,23 @@ func (ib *InvalidBlock) addBadStream(bsID sttypes.StreamID) { } type StagedStreamSync struct { - bc core.BlockChain - consensus *consensus.Consensus - isBeacon bool - isExplorer bool - db kv.RwDB - protocol syncProtocol - isBeaconNode bool - gbm *blockDownloadManager // initialized when finished get block number - lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus - lastMileMux sync.Mutex - inserted int - config Config - logger zerolog.Logger - status *status //TODO: merge this with currentSyncCycle - initSync bool // if sets to true, node start long range syncing - UseMemDB bool - + bc core.BlockChain + consensus *consensus.Consensus + isBeacon bool + isExplorer bool + db kv.RwDB + protocol syncProtocol + isBeaconNode bool + gbm *blockDownloadManager // initialized when finished get block number + rdm *receiptDownloadManager + lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus + lastMileMux sync.Mutex + inserted int + config Config + logger zerolog.Logger + status *status //TODO: merge this with currentSyncCycle + initSync bool // if sets to true, node start long range syncing + UseMemDB bool revertPoint *uint64 // used to run stages prevRevertPoint *uint64 // used to get value from outside of staged sync after cycle (for example to notify RPCDaemon) invalidBlock InvalidBlock diff --git a/api/service/stagedstreamsync/stages.go b/api/service/stagedstreamsync/stages.go index 909bb25c0..6ad9e4519 100644 --- a/api/service/stagedstreamsync/stages.go +++ b/api/service/stagedstreamsync/stages.go @@ -13,7 +13,7 @@ const ( SyncEpoch SyncStageID = "SyncEpoch" // epoch sync BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified States SyncStageID = "States" // will construct most recent state from downloaded blocks - StateSync SyncStageID = "StateSync" // State sync + StateSync SyncStageID = "StateSync" // State sync Receipts SyncStageID = "Receipts" // Receipts LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well Finish SyncStageID = "Finish" // Nominal stage after all other stages diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go index f06ec9cb6..1cd414757 100644 --- a/api/service/stagedstreamsync/state_download_manager.go +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -229,13 +229,13 @@ func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, trie // add requested code hashes to retries for _, h := range codeHashes { - s.retries.addCodeTask(h,s.requesting.codeTasks[h]) + s.retries.addCodeTask(h, s.requesting.codeTasks[h]) delete(s.requesting.codeTasks, h) } // add requested trie paths to retries for _, path := range triePaths { - s.retries.addTrieTask(path,s.requesting.trieTasks[path]) + s.retries.addTrieTask(path, s.requesting.trieTasks[path]) delete(s.requesting.trieTasks, path) } } @@ -282,7 +282,7 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri } for _, hash := range codeHashes { - task:= s.requesting.getCodeTask(hash) + task := s.requesting.getCodeTask(hash) // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls).