Mark infinite loops inside goroutines

pull/1710/head
Eugene Kim 5 years ago
parent 386a948f7b
commit 9a4468d7d5
  1. 1
      internal/hmyapi/filters/api.go
  2. 3
      internal/memprofiling/lib.go
  3. 2
      internal/profiler/profiler.go
  4. 3
      node/node_handler.go
  5. 3
      node/node_syncing.go

@ -60,6 +60,7 @@ func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
// timeoutLoop runs every 5 minutes and deletes filters that have not been recently used. // timeoutLoop runs every 5 minutes and deletes filters that have not been recently used.
// Tt is started when the api is created. // Tt is started when the api is created.
func (api *PublicFilterAPI) timeoutLoop() { func (api *PublicFilterAPI) timeoutLoop() {
// TODO ek – infinite loop; add shutdown/cleanup logic
ticker := time.NewTicker(5 * time.Minute) ticker := time.NewTicker(5 * time.Minute)
for { for {
<-ticker.C <-ticker.C

@ -79,6 +79,7 @@ func (m *MemProfiling) Stop() {
// PeriodicallyScanMemSize scans memsize of the observed objects every 30 seconds. // PeriodicallyScanMemSize scans memsize of the observed objects every 30 seconds.
func (m *MemProfiling) PeriodicallyScanMemSize() { func (m *MemProfiling) PeriodicallyScanMemSize() {
go func() { go func() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
select { select {
case <-time.After(memSizeScanTime): case <-time.After(memSizeScanTime):
@ -98,6 +99,7 @@ func (m *MemProfiling) PeriodicallyScanMemSize() {
// MaybeCallGCPeriodically runs GC manually every gcTime minutes. This is one of the options to mitigate the OOM issue. // MaybeCallGCPeriodically runs GC manually every gcTime minutes. This is one of the options to mitigate the OOM issue.
func MaybeCallGCPeriodically() { func MaybeCallGCPeriodically() {
go func() { go func() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
select { select {
case <-time.After(gcTime): case <-time.After(gcTime):
@ -108,6 +110,7 @@ func MaybeCallGCPeriodically() {
} }
}() }()
go func() { go func() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
select { select {
case <-time.After(memStatTime): case <-time.After(memStatTime):

@ -43,6 +43,7 @@ func (profiler *Profiler) Config(shardID uint32, metricsReportURL string) {
// LogMemory logs memory. // LogMemory logs memory.
func (profiler *Profiler) LogMemory() { func (profiler *Profiler) LogMemory() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
// log mem usage // log mem usage
info, _ := profiler.proc.MemoryInfo() info, _ := profiler.proc.MemoryInfo()
@ -63,6 +64,7 @@ func (profiler *Profiler) LogMemory() {
// LogCPU logs CPU metrics. // LogCPU logs CPU metrics.
func (profiler *Profiler) LogCPU() { func (profiler *Profiler) LogCPU() {
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
// log cpu usage // log cpu usage
percent, _ := profiler.proc.CPUPercent() percent, _ := profiler.proc.CPUPercent()

@ -40,6 +40,7 @@ const (
func (node *Node) ReceiveGlobalMessage() { func (node *Node) ReceiveGlobalMessage() {
ctx := context.Background() ctx := context.Background()
for { for {
// TODO ek – infinite loop; add shutdown/cleanup logic
if node.globalGroupReceiver == nil { if node.globalGroupReceiver == nil {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
continue continue
@ -59,6 +60,7 @@ func (node *Node) ReceiveGlobalMessage() {
// ReceiveGroupMessage use libp2p pubsub mechanism to receive broadcast messages // ReceiveGroupMessage use libp2p pubsub mechanism to receive broadcast messages
func (node *Node) ReceiveGroupMessage() { func (node *Node) ReceiveGroupMessage() {
ctx := context.Background() ctx := context.Background()
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
if node.shardGroupReceiver == nil { if node.shardGroupReceiver == nil {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
@ -79,6 +81,7 @@ func (node *Node) ReceiveGroupMessage() {
// ReceiveClientGroupMessage use libp2p pubsub mechanism to receive broadcast messages for client // ReceiveClientGroupMessage use libp2p pubsub mechanism to receive broadcast messages for client
func (node *Node) ReceiveClientGroupMessage() { func (node *Node) ReceiveClientGroupMessage() {
ctx := context.Background() ctx := context.Background()
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
if node.clientReceiver == nil { if node.clientReceiver == nil {
// check less frequent on client messages // check less frequent on client messages

@ -162,6 +162,7 @@ func (p *LocalSyncingPeerProvider) SyncingPeers(shardID uint32) (peers []p2p.Pee
// DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks // DoBeaconSyncing update received beaconchain blocks and downloads missing beacon chain blocks
func (node *Node) DoBeaconSyncing() { func (node *Node) DoBeaconSyncing() {
go func(node *Node) { go func(node *Node) {
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
select { select {
case beaconBlock := <-node.BeaconBlockChannel: case beaconBlock := <-node.BeaconBlockChannel:
@ -170,6 +171,7 @@ func (node *Node) DoBeaconSyncing() {
} }
}(node) }(node)
// TODO ek – infinite loop; add shutdown/cleanup logic
for { for {
if node.beaconSync == nil { if node.beaconSync == nil {
utils.Logger().Info().Msg("initializing beacon sync") utils.Logger().Info().Msg("initializing beacon sync")
@ -198,6 +200,7 @@ func (node *Node) DoBeaconSyncing() {
// DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up // DoSyncing keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up
func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool) { func (node *Node) DoSyncing(bc *core.BlockChain, worker *worker.Worker, willJoinConsensus bool) {
// TODO ek – infinite loop; add shutdown/cleanup logic
SyncingLoop: SyncingLoop:
for { for {
if node.stateSync == nil { if node.stateSync == nil {

Loading…
Cancel
Save