@ -108,6 +108,11 @@ var (
type accountTask struct {
type accountTask struct {
id uint64 //unique id for account task
id uint64 //unique id for account task
root common . Hash
origin common . Hash
limit common . Hash
cap int
// These fields get serialized to leveldb on shutdown
// These fields get serialized to leveldb on shutdown
Next common . Hash // Next account to sync in this interval
Next common . Hash // Next account to sync in this interval
Last common . Hash // Last account to sync in this interval
Last common . Hash // Last account to sync in this interval
@ -229,16 +234,19 @@ type byteCodeTasksBundle struct {
id uint64 //unique id for bytecode task bundle
id uint64 //unique id for bytecode task bundle
task * accountTask
task * accountTask
hashes [ ] common . Hash
hashes [ ] common . Hash
cap int
}
}
type storageTaskBundle struct {
type storageTaskBundle struct {
id uint64 //unique id for storage task bundle
id uint64 //unique id for storage task bundle
root common . Hash
accounts [ ] common . Hash
accounts [ ] common . Hash
roots [ ] common . Hash
roots [ ] common . Hash
mainTask * accountTask
mainTask * accountTask
subtask * storageTask
subtask * storageTask
origin common . Hash
origin common . Hash
limit common . Hash
limit common . Hash
cap int
}
}
// healTask represents the sync task for healing the snap-synced chunk boundaries.
// healTask represents the sync task for healing the snap-synced chunk boundaries.
@ -251,6 +259,7 @@ type healTask struct {
pathsets [ ] * message . TrieNodePathSet
pathsets [ ] * message . TrieNodePathSet
task * healTask
task * healTask
root common . Hash
root common . Hash
bytes int
byteCodeReq bool
byteCodeReq bool
}
}
@ -259,7 +268,6 @@ type tasks struct {
storageTasks map [ uint64 ] * storageTaskBundle // Set of trie node tasks currently queued for retrieval, indexed by path
storageTasks map [ uint64 ] * storageTaskBundle // Set of trie node tasks currently queued for retrieval, indexed by path
codeTasks map [ uint64 ] * byteCodeTasksBundle // Set of byte code tasks currently queued for retrieval, indexed by hash
codeTasks map [ uint64 ] * byteCodeTasksBundle // Set of byte code tasks currently queued for retrieval, indexed by hash
healer map [ uint64 ] * healTask
healer map [ uint64 ] * healTask
snapped bool // Flag to signal that snap phase is done
}
}
func newTasks ( ) * tasks {
func newTasks ( ) * tasks {
@ -268,7 +276,6 @@ func newTasks() *tasks {
storageTasks : make ( map [ uint64 ] * storageTaskBundle , 0 ) ,
storageTasks : make ( map [ uint64 ] * storageTaskBundle , 0 ) ,
codeTasks : make ( map [ uint64 ] * byteCodeTasksBundle ) ,
codeTasks : make ( map [ uint64 ] * byteCodeTasksBundle ) ,
healer : make ( map [ uint64 ] * healTask , 0 ) ,
healer : make ( map [ uint64 ] * healTask , 0 ) ,
snapped : false ,
}
}
}
}
@ -399,8 +406,6 @@ type FullStateDownloadManager struct {
storageSynced uint64 // Number of storage slots downloaded
storageSynced uint64 // Number of storage slots downloaded
storageBytes common . StorageSize // Number of storage trie bytes persisted to disk
storageBytes common . StorageSize // Number of storage trie bytes persisted to disk
pend sync . WaitGroup // Tracks network request goroutines for graceful shutdown
stateWriter ethdb . Batch // Shared batch writer used for persisting raw states
stateWriter ethdb . Batch // Shared batch writer used for persisting raw states
accountHealed uint64 // Number of accounts downloaded during the healing stage
accountHealed uint64 // Number of accounts downloaded during the healing stage
accountHealedBytes common . StorageSize // Number of raw account bytes persisted to disk during the healing stage
accountHealedBytes common . StorageSize // Number of raw account bytes persisted to disk during the healing stage
@ -420,6 +425,9 @@ type FullStateDownloadManager struct {
bytecodeHealBytes common . StorageSize // Number of bytecodes persisted to disk
bytecodeHealBytes common . StorageSize // Number of bytecodes persisted to disk
bytecodeHealDups uint64 // Number of bytecodes already processed
bytecodeHealDups uint64 // Number of bytecodes already processed
bytecodeHealNops uint64 // Number of bytecodes not requested
bytecodeHealNops uint64 // Number of bytecodes not requested
startTime time . Time // Time instance when snapshot sync started
logTime time . Time // Time instance when status was last reported
}
}
func newFullStateDownloadManager ( db ethdb . KeyValueStore ,
func newFullStateDownloadManager ( db ethdb . KeyValueStore ,
@ -442,6 +450,7 @@ func newFullStateDownloadManager(db ethdb.KeyValueStore,
requesting : newTasks ( ) ,
requesting : newTasks ( ) ,
processing : newTasks ( ) ,
processing : newTasks ( ) ,
retries : newTasks ( ) ,
retries : newTasks ( ) ,
trienodeHealThrottle : maxTrienodeHealThrottle , // Tune downward instead of insta-filling with junk
}
}
}
}
@ -531,6 +540,12 @@ func (s *FullStateDownloadManager) commitHealer(force bool) {
utils . Logger ( ) . Debug ( ) . Str ( "type" , "trienodes" ) . Interface ( "bytes" , common . StorageSize ( batch . ValueSize ( ) ) ) . Msg ( "Persisted set of healing data" )
utils . Logger ( ) . Debug ( ) . Str ( "type" , "trienodes" ) . Interface ( "bytes" , common . StorageSize ( batch . ValueSize ( ) ) ) . Msg ( "Persisted set of healing data" )
}
}
func ( s * FullStateDownloadManager ) SyncStarted ( ) {
if s . startTime == ( time . Time { } ) {
s . startTime = time . Now ( )
}
}
func ( s * FullStateDownloadManager ) SyncCompleted ( ) {
func ( s * FullStateDownloadManager ) SyncCompleted ( ) {
defer func ( ) { // Persist any progress, independent of failure
defer func ( ) { // Persist any progress, independent of failure
for _ , task := range s . tasks . accountTasks {
for _ , task := range s . tasks . accountTasks {
@ -556,7 +571,8 @@ func (s *FullStateDownloadManager) SyncCompleted() {
utils . Logger ( ) . Debug ( ) . Interface ( "root" , s . root ) . Msg ( "Terminating snapshot sync cycle" )
utils . Logger ( ) . Debug ( ) . Interface ( "root" , s . root ) . Msg ( "Terminating snapshot sync cycle" )
} ( )
} ( )
utils . Logger ( ) . Debug ( ) . Msg ( "Snapshot sync already completed" )
elapsed := time . Since ( s . startTime )
utils . Logger ( ) . Debug ( ) . Interface ( "elapsed" , elapsed ) . Msg ( "Snapshot sync already completed" )
}
}
// getNextBatch returns objects with a maximum of n state download
// getNextBatch returns objects with a maximum of n state download
@ -566,38 +582,30 @@ func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask,
storages * storageTaskBundle ,
storages * storageTaskBundle ,
healtask * healTask ,
healtask * healTask ,
codetask * healTask ,
codetask * healTask ,
nItems int ,
err error ) {
err error ) {
s . lock . Lock ( )
s . lock . Lock ( )
defer s . lock . Unlock ( )
defer s . lock . Unlock ( )
accounts , codes , storages , healtask , codetask = s . getBatchFromRetries ( )
accounts , codes , storages , healtask , codetask , nItems = s . getBatchFromRetries ( )
nItems := len ( accounts ) + len ( codes ) + len ( storages . roots ) + len ( healtask . hashes ) + len ( codetask . hashes )
if nItems > 0 {
if nItems > 0 {
return
return
}
}
if len ( s . tasks . accountTasks ) == 0 && s . scheduler . Pending ( ) == 0 {
if len ( s . tasks . accountTasks ) == 0 && s . scheduler . Pending ( ) == 0 {
if nItems == 0 {
s . SyncCompleted ( )
s . SyncCompleted ( )
}
return
return
}
}
// Refill available tasks from the scheduler.
// Refill available tasks from the scheduler.
withHealTasks := true
newAccounts , newCodes , newStorageTaskBundle , newHealTask , newCodeTask , nItems := s . getBatchFromUnprocessed ( )
if healtask != nil || codetask != nil {
withHealTasks = false
}
newAccounts , newCodes , newStorageTaskBundle , newHealTask , newCodeTask := s . getBatchFromUnprocessed ( withHealTasks )
accounts = append ( accounts , newAccounts ... )
accounts = append ( accounts , newAccounts ... )
codes = append ( codes , newCodes ... )
codes = append ( codes , newCodes ... )
storages = newStorageTaskBundle
storages = newStorageTaskBundle
if withHealTasks {
healtask = newHealTask
healtask = newHealTask
codetask = newCodeTask
codetask = newCodeTask
}
return
return
}
}
@ -714,7 +722,7 @@ func (s *FullStateDownloadManager) loadSyncStatus() {
// Either we've failed to decode the previous state, or there was none.
// Either we've failed to decode the previous state, or there was none.
// Start a fresh sync by chunking up the account range and scheduling
// Start a fresh sync by chunking up the account range and scheduling
// them for retrieval.
// them for retrieval.
s . tasks . accountTasks = nil
s . tasks = newTasks ( )
s . accountSynced , s . accountBytes = 0 , 0
s . accountSynced , s . accountBytes = 0 , 0
s . bytecodeSynced , s . bytecodeBytes = 0 , 0
s . bytecodeSynced , s . bytecodeBytes = 0 , 0
s . storageSynced , s . storageBytes = 0 , 0
s . storageSynced , s . storageBytes = 0 , 0
@ -921,16 +929,18 @@ func (s *FullStateDownloadManager) updateStats(written, duplicate, unexpected in
// getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download
// getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download
// tasks to send to the remote peer.
// tasks to send to the remote peer.
func ( s * FullStateDownloadManager ) getBatchFromUnprocessed ( withHealTasks bool ) (
func ( s * FullStateDownloadManager ) getBatchFromUnprocessed ( ) (
accounts [ ] * accountTask ,
accounts [ ] * accountTask ,
codes [ ] * byteCodeTasksBundle ,
codes [ ] * byteCodeTasksBundle ,
storages * storageTaskBundle ,
storages * storageTaskBundle ,
healtask * healTask ,
healtask * healTask ,
codetask * healTask ) {
codetask * healTask ,
count int ) {
// over trie nodes as those can be written to disk and forgotten about.
// over trie nodes as those can be written to disk and forgotten about.
codes = make ( [ ] * byteCodeTasksBundle , 0 )
codes = make ( [ ] * byteCodeTasksBundle , 0 )
accounts = make ( [ ] * accountTask , 0 )
accounts = make ( [ ] * accountTask , 0 )
count = 0
for i , task := range s . tasks . accountTasks {
for i , task := range s . tasks . accountTasks {
// Stop when we've gathered enough requests
// Stop when we've gathered enough requests
@ -956,12 +966,18 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
break
break
}
}
task . root = s . root
task . origin = task . Next
task . limit = task . Last
task . cap = maxRequestSize
task . requested = true
s . tasks . accountTasks [ i ] . requested = true
s . tasks . accountTasks [ i ] . requested = true
accounts = append ( accounts , task )
accounts = append ( accounts , task )
s . requesting . addAccountTask ( task . id , task )
s . requesting . addAccountTask ( task . id , task )
s . tasks . addAccountTask ( task . id , task )
s . tasks . addAccountTask ( task . id , task )
// one task account is enough for an stream
// one task account is enough for an stream
count = len ( accounts )
return
return
}
}
@ -997,6 +1013,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
id : taskID ,
id : taskID ,
hashes : hashes ,
hashes : hashes ,
task : task ,
task : task ,
cap : maxRequestSize ,
}
}
codes = append ( codes , bytecodeTask )
codes = append ( codes , bytecodeTask )
@ -1005,12 +1022,14 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
// Stop when we've gathered enough requests
// Stop when we've gathered enough requests
if totalHashes >= maxCodeRequestCount {
if totalHashes >= maxCodeRequestCount {
count = totalHashes
return
return
}
}
}
}
// if we found some codes, can assign it to node
// if we found some codes, can assign it to node
if totalHashes > 0 {
if totalHashes > 0 {
count = totalHashes
return
return
}
}
@ -1020,14 +1039,8 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
continue
continue
}
}
// TODO: check cap calculations (shouldn't give us big chunk)
cap := maxRequestSize
// if cap > maxRequestSize {
storageSets := cap / 1024
// cap = maxRequestSize
// }
// if cap < minRequestSize { // Don't bother with peers below a bare minimum performance
// cap = minRequestSize
// }
storageSets := maxRequestSize / 1024
storages = & storageTaskBundle {
storages = & storageTaskBundle {
accounts : make ( [ ] common . Hash , 0 , storageSets ) ,
accounts : make ( [ ] common . Hash , 0 , storageSets ) ,
@ -1089,23 +1102,21 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
storages . origin = storages . subtask . Next
storages . origin = storages . subtask . Next
storages . limit = storages . subtask . Last
storages . limit = storages . subtask . Last
}
}
storages . root = s . root
storages . cap = cap
s . tasks . addStorageTaskBundle ( taskID , storages )
s . tasks . addStorageTaskBundle ( taskID , storages )
s . requesting . addStorageTaskBundle ( taskID , storages )
s . requesting . addStorageTaskBundle ( taskID , storages )
count = len ( storages . accounts )
return
return
}
}
if len ( storages . accounts ) > 0 {
if len ( storages . accounts ) > 0 {
return
count = len ( storages . accounts )
}
if ! withHealTasks {
return
return
}
}
// Sync phase done, run heal phase
// Sync phase done, run heal phase
// Iterate over pending tasks
// Iterate over pending tasks and try to find a peer to retrieve with
for ( len ( s . tasks . healer ) > 0 && len ( s . tasks . healer [ 0 ] . hashes ) > 0 ) || s . scheduler . Pending ( ) > 0 {
for ( len ( s . tasks . healer ) > 0 && len ( s . tasks . healer [ 0 ] . hashes ) > 0 ) || s . scheduler . Pending ( ) > 0 {
// If there are not enough trie tasks queued to fully assign, fill the
// If there are not enough trie tasks queued to fully assign, fill the
// queue from the state sync scheduler. The trie synced schedules these
// queue from the state sync scheduler. The trie synced schedules these
@ -1129,7 +1140,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
// If all the heal tasks are bytecodes or already downloading, bail
// If all the heal tasks are bytecodes or already downloading, bail
if len ( s . tasks . healer [ 0 ] . trieTasks ) == 0 {
if len ( s . tasks . healer [ 0 ] . trieTasks ) == 0 {
return
break
}
}
// Generate the network query and send it to the peer
// Generate the network query and send it to the peer
// if cap > maxTrieRequestCount {
// if cap > maxTrieRequestCount {
@ -1177,6 +1188,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
pathsets : pathsets ,
pathsets : pathsets ,
root : s . root ,
root : s . root ,
task : s . tasks . healer [ 0 ] ,
task : s . tasks . healer [ 0 ] ,
bytes : maxRequestSize ,
byteCodeReq : false ,
byteCodeReq : false ,
}
}
@ -1184,6 +1196,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
s . requesting . addHealerTask ( taskID , healtask )
s . requesting . addHealerTask ( taskID , healtask )
if len ( hashes ) > 0 {
if len ( hashes ) > 0 {
count = len ( hashes )
return
return
}
}
}
}
@ -1205,7 +1218,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
// If all the heal tasks are trienodes or already downloading, bail
// If all the heal tasks are trienodes or already downloading, bail
if len ( s . tasks . healer [ 0 ] . codeTasks ) == 0 {
if len ( s . tasks . healer [ 0 ] . codeTasks ) == 0 {
return
break
}
}
// Task pending retrieval, try to find an idle peer. If no such peer
// Task pending retrieval, try to find an idle peer. If no such peer
// exists, we probably assigned tasks for all (or they are stateless).
// exists, we probably assigned tasks for all (or they are stateless).
@ -1243,9 +1256,10 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) (
id : taskID ,
id : taskID ,
hashes : hashes ,
hashes : hashes ,
task : s . tasks . healer [ 0 ] ,
task : s . tasks . healer [ 0 ] ,
bytes : maxRequestSize ,
byteCodeReq : true ,
byteCodeReq : true ,
}
}
count = len ( hashes )
s . tasks . healer [ taskID ] = codetask
s . tasks . healer [ taskID ] = codetask
s . requesting . addHealerTask ( taskID , healtask )
s . requesting . addHealerTask ( taskID , healtask )
}
}
@ -1272,7 +1286,8 @@ func (s *FullStateDownloadManager) getBatchFromRetries() (
codes [ ] * byteCodeTasksBundle ,
codes [ ] * byteCodeTasksBundle ,
storages * storageTaskBundle ,
storages * storageTaskBundle ,
healtask * healTask ,
healtask * healTask ,
codetask * healTask ) {
codetask * healTask ,
count int ) {
// over trie nodes as those can be written to disk and forgotten about.
// over trie nodes as those can be written to disk and forgotten about.
accounts = make ( [ ] * accountTask , 0 )
accounts = make ( [ ] * accountTask , 0 )
@ -1290,6 +1305,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() (
}
}
if len ( accounts ) > 0 {
if len ( accounts ) > 0 {
count = len ( accounts )
return
return
}
}
@ -1301,6 +1317,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() (
}
}
if len ( codes ) > 0 {
if len ( codes ) > 0 {
count = len ( codes )
return
return
}
}
@ -1316,10 +1333,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() (
}
}
s . requesting . addStorageTaskBundle ( storages . id , storages )
s . requesting . addStorageTaskBundle ( storages . id , storages )
s . retries . deleteStorageTaskBundle ( storages . id )
s . retries . deleteStorageTaskBundle ( storages . id )
return
count = len ( storages . accounts )
}
if len ( storages . accounts ) > 0 {
return
return
}
}
@ -1338,6 +1352,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() (
}
}
s . requesting . addHealerTask ( id , task )
s . requesting . addHealerTask ( id , task )
s . retries . deleteHealerTask ( id )
s . retries . deleteHealerTask ( id )
count = len ( task . hashes )
return
return
}
}
if task . byteCodeReq {
if task . byteCodeReq {
@ -1352,11 +1367,13 @@ func (s *FullStateDownloadManager) getBatchFromRetries() (
}
}
s . requesting . addHealerTask ( id , task )
s . requesting . addHealerTask ( id , task )
s . retries . deleteHealerTask ( id )
s . retries . deleteHealerTask ( id )
count = len ( task . hashes )
return
return
}
}
}
}
}
}
count = 0
return
return
}
}
@ -1371,15 +1388,19 @@ func (s *FullStateDownloadManager) HandleRequestError(accounts []*accountTask,
s . lock . Lock ( )
s . lock . Lock ( )
defer s . lock . Unlock ( )
defer s . lock . Unlock ( )
if accounts != nil && len ( accounts ) > 0 {
for _ , task := range accounts {
for _ , task := range accounts {
s . requesting . deleteAccountTask ( task . id )
s . requesting . deleteAccountTask ( task . id )
s . retries . addAccountTask ( task . id , task )
s . retries . addAccountTask ( task . id , task )
}
}
}
if codes != nil && len ( codes ) > 0 {
for _ , code := range codes {
for _ , code := range codes {
s . requesting . deleteCodeTask ( code . id )
s . requesting . deleteCodeTask ( code . id )
s . retries . addCodeTask ( code . id , code )
s . retries . addCodeTask ( code . id , code )
}
}
}
if storages != nil {
if storages != nil {
s . requesting . addStorageTaskBundle ( storages . id , storages )
s . requesting . addStorageTaskBundle ( storages . id , storages )