Mark infinite loops inside goroutines

pull/1710/head
Eugene Kim 5 years ago
parent 386a948f7b
commit 9a4468d7d5
  1. 1
      internal/hmyapi/filters/api.go
  2. 3
      internal/memprofiling/lib.go
  3. 2
      internal/profiler/profiler.go
  4. 3
      node/node_handler.go
  5. 3
      node/node_syncing.go

@ -60,6 +60,7 @@ func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
// timeoutLoop runs every 5 minutes and deletes filters that have not been recently used.
// Tt is started when the api is created.
func (api *PublicFilterAPI) timeoutLoop() {
// TODO ek – infinite loop; add shutdown/cleanup logic
ticker := time.NewTicker(5 * time.Minute)
for {
<-ticker.C

@ -79,6 +79,7 @@ func (m *MemProfiling) Stop() {
// PeriodicallyScanMemSize scans memsize of the observed objects every 30 seconds.
func (m *MemProfiling) PeriodicallyScanMemSize() {
go func() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
select {
case <-time.After(memSizeScanTime):
@ -98,6 +99,7 @@ func (m *MemProfiling) PeriodicallyScanMemSize() {
// MaybeCallGCPeriodically runs GC manually every gcTime minutes. This is one of the options to mitigate the OOM issue.
func MaybeCallGCPeriodically() {
go func() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
select {
case <-time.After(gcTime):
@ -108,6 +110,7 @@ func MaybeCallGCPeriodically() {
}
}()
go func() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
select {
case <-time.After(memStatTime):

@ -43,6 +43,7 @@ func (profiler *Profiler) Config(shardID uint32, metricsReportURL string) {
// LogMemory logs memory.
func (profiler *Profiler) LogMemory() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
// log mem usage
info, _ := profiler.proc.MemoryInfo()
@ -63,6 +64,7 @@ func (profiler *Profiler) LogMemory() {
// LogCPU logs CPU metrics.
func (profiler *Profiler) LogCPU() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
// log cpu usage
percent, _ := profiler.proc.CPUPercent()

@ -40,6 +40,7 @@ const (
func (node *Node) ReceiveGlobalMessage() {
ctx := context.Background()
for {
// TODO ek – infinite loop; add shutdown/cleanup logic
if node.globalGroupReceiver == nil {
time.Sleep(100 * time.Millisecond)
continue
@ -59,6 +60,7 @@ func (node *Node) ReceiveGlobalMessage() {
// ReceiveGroupMessage use libp2p pubsub mechanism to receive broadcast messages
func (node *Node) ReceiveGroupMessage() {
ctx := context.Background()
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
if node.shardGroupReceiver == nil {
time.Sleep(100 * time.Millisecond)
@ -79,6 +81,7 @@ func (node *Node) ReceiveGroupMessage() {
// ReceiveClientGroupMessage use libp2p pubsub mechanism to receive broadcast messages for client
func (node *Node) ReceiveClientGroupMessage() {
ctx := context.Background()
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
if node.clientReceiver == nil {
// check less frequent on client messages

@ -162,6 +162,7 @@ func (p *LocalSyncingPeerProvider) SyncingPeers(shardID uint32) (peers []p2p.Pee
// DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks
func (node *Node) DoBeaconSyncing() {
go func(node *Node) {
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
select {
case beaconBlock := <-node.BeaconBlockChannel:
@ -170,6 +171,7 @@ func (node *Node) DoBeaconSyncing() {
}
}(node)
// TODO ek – infinite loop; add shutdown/cleanup logic
for {
if node.beaconSync == nil {
utils.Logger().Info().Msg("initializing beacon sync")
@ -198,6 +200,7 @@ func (node *Node) DoBeaconSyncing() {
// DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up
func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool) {
// TODO ek – infinite loop; add shutdown/cleanup logic
SyncingLoop:
for {
if node.stateSync == nil {

Loading…
Cancel
Save