Replaced loggers in `core/` with custom shared logger

pull/1175/head
Kai Lee 5 years ago
parent 100d7154f6
commit 7aaa06faa3
  1. 55
      core/blockchain.go
  2. 4
      core/chain_indexer.go
  3. 13
      core/genesis.go
  4. 15
      core/headerchain.go
  5. 49
      core/rawdb/accessors_chain.go
  6. 15
      core/rawdb/accessors_indexes.go
  7. 13
      core/rawdb/accessors_metadata.go
  8. 5
      core/state_transition.go
  9. 9
      core/tx_journal.go
  10. 5
      core/tx_list.go
  11. 73
      core/tx_pool.go

@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -253,20 +252,20 @@ func (bc *BlockChain) loadLastState() error {
head := rawdb.ReadHeadBlockHash(bc.db) head := rawdb.ReadHeadBlockHash(bc.db)
if head == (common.Hash{}) { if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch // Corrupt or empty database, init from scratch
log.Warn("Empty database, resetting chain") utils.GetLogger().Warn("Empty database, resetting chain")
return bc.Reset() return bc.Reset()
} }
// Make sure the entire head block is available // Make sure the entire head block is available
currentBlock := bc.GetBlockByHash(head) currentBlock := bc.GetBlockByHash(head)
if currentBlock == nil { if currentBlock == nil {
// Corrupt or empty database, init from scratch // Corrupt or empty database, init from scratch
log.Warn("Head block missing, resetting chain", "hash", head) utils.GetLogger().Warn("Head block missing, resetting chain", "hash", head)
return bc.Reset() return bc.Reset()
} }
// Make sure the state associated with the block is available // Make sure the state associated with the block is available
if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
// Dangling block without a state associated, init from scratch // Dangling block without a state associated, init from scratch
log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) utils.GetLogger().Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
if err := bc.repair(&currentBlock); err != nil { if err := bc.repair(&currentBlock); err != nil {
return err return err
} }
@ -298,9 +297,9 @@ func (bc *BlockChain) loadLastState() error {
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) utils.GetLogger().Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) utils.GetLogger().Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) utils.GetLogger().Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
return nil return nil
} }
@ -310,7 +309,7 @@ func (bc *BlockChain) loadLastState() error {
// though, the head may be further rewound if block bodies are missing (non-archive // though, the head may be further rewound if block bodies are missing (non-archive
// nodes after a fast sync). // nodes after a fast sync).
func (bc *BlockChain) SetHead(head uint64) error { func (bc *BlockChain) SetHead(head uint64) error {
log.Warn("Rewinding blockchain", "target", head) utils.GetLogger().Warn("Rewinding blockchain", "target", head)
bc.mu.Lock() bc.mu.Lock()
defer bc.mu.Unlock() defer bc.mu.Unlock()
@ -376,7 +375,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
bc.currentBlock.Store(block) bc.currentBlock.Store(block)
bc.mu.Unlock() bc.mu.Unlock()
log.Info("Committed new head block", "number", block.Number(), "hash", hash) utils.GetLogger().Info("Committed new head block", "number", block.Number(), "hash", hash)
return nil return nil
} }
@ -478,7 +477,7 @@ func (bc *BlockChain) repair(head **types.Block) error {
for { for {
// Abort if we've rewound to a head block that does have associated state // Abort if we've rewound to a head block that does have associated state
if _, err := state.New((*head).Root(), bc.stateCache); err == nil { if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) utils.GetLogger().Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
return nil return nil
} }
// Otherwise rewind one block and recheck state availability there // Otherwise rewind one block and recheck state availability there
@ -499,7 +498,7 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
if first > last { if first > last {
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
} }
log.Info("Exporting batch of blocks", "count", last-first+1) utils.GetLogger().Info("Exporting batch of blocks", "count", last-first+1)
start, reported := time.Now(), time.Now() start, reported := time.Now(), time.Now()
for nr := first; nr <= last; nr++ { for nr := first; nr <= last; nr++ {
@ -511,7 +510,7 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
return err return err
} }
if time.Since(reported) >= statsReportLimit { if time.Since(reported) >= statsReportLimit {
log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start))) utils.GetLogger().Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
reported = time.Now() reported = time.Now()
} }
} }
@ -727,9 +726,9 @@ func (bc *BlockChain) Stop() {
if number := bc.CurrentBlock().NumberU64(); number > offset { if number := bc.CurrentBlock().NumberU64(); number > offset {
recent := bc.GetBlockByNumber(number - offset) recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) utils.GetLogger().Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
if err := triedb.Commit(recent.Root(), true); err != nil { if err := triedb.Commit(recent.Root(), true); err != nil {
log.Error("Failed to commit recent state trie", "err", err) utils.GetLogger().Error("Failed to commit recent state trie", "err", err)
} }
} }
} }
@ -737,10 +736,10 @@ func (bc *BlockChain) Stop() {
triedb.Dereference(bc.triegc.PopItem().(common.Hash)) triedb.Dereference(bc.triegc.PopItem().(common.Hash))
} }
if size, _ := triedb.Size(); size != 0 { if size, _ := triedb.Size(); size != 0 {
log.Error("Dangling trie nodes after full cleanup") utils.GetLogger().Error("Dangling trie nodes after full cleanup")
} }
} }
log.Info("Blockchain manager stopped") utils.GetLogger().Info("Blockchain manager stopped")
} }
func (bc *BlockChain) procFutureBlocks() { func (bc *BlockChain) procFutureBlocks() {
@ -843,7 +842,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Do a sanity check that the provided chain is actually ordered and linked // Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(blockChain); i++ { for i := 1; i < len(blockChain); i++ {
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), utils.GetLogger().Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
@ -917,7 +916,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if stats.ignored > 0 { if stats.ignored > 0 {
context = append(context, []interface{}{"ignored", stats.ignored}...) context = append(context, []interface{}{"ignored", stats.ignored}...)
} }
log.Info("Imported new block receipts", context...) utils.GetLogger().Info("Imported new block receipts", context...)
return 0, nil return 0, nil
} }
@ -986,7 +985,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// If we're exceeding limits but haven't reached a large enough memory gap, // If we're exceeding limits but haven't reached a large enough memory gap,
// warn the user that the system is becoming unstable. // warn the user that the system is becoming unstable.
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) utils.GetLogger().Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
} }
// Flush an entire trie and restart the counters // Flush an entire trie and restart the counters
triedb.Commit(header.Root, true) triedb.Commit(header.Root, true)
@ -1082,7 +1081,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i := 1; i < len(chain); i++ { for i := 1; i < len(chain); i++ {
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
// Chain broke ancestry, log a message (programming error) and skip insertion // Chain broke ancestry, log a message (programming error) and skip insertion
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), utils.GetLogger().Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
@ -1123,7 +1122,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i, block := range chain { for i, block := range chain {
// If the chain is terminating, stop processing blocks // If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 { if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing") utils.GetLogger().Debug("Premature abort during blocks processing")
break break
} }
// Wait for the block's verification to complete // Wait for the block's verification to complete
@ -1229,7 +1228,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
} }
switch status { switch status {
case CanonStatTy: case CanonStatTy:
log.Info("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), utils.GetLogger().Info("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
coalescedLogs = append(coalescedLogs, logs...) coalescedLogs = append(coalescedLogs, logs...)
@ -1241,7 +1240,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
bc.gcproc += proctime bc.gcproc += proctime
case SideStatTy: case SideStatTy:
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "elapsed", utils.GetLogger().Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "elapsed",
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
blockInsertTimer.UpdateSince(bstart) blockInsertTimer.UpdateSince(bstart)
@ -1303,7 +1302,7 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
if st.ignored > 0 { if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...) context = append(context, []interface{}{"ignored", st.ignored}...)
} }
log.Info("Imported new chain segment", context...) utils.GetLogger().Info("Imported new chain segment", context...)
*st = insertStats{startTime: now, lastIndex: index + 1} *st = insertStats{startTime: now, lastIndex: index + 1}
} }
@ -1389,14 +1388,14 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} }
// Ensure the user sees large reorgs // Ensure the user sees large reorgs
if len(oldChain) > 0 && len(newChain) > 0 { if len(oldChain) > 0 && len(newChain) > 0 {
logFn := log.Debug logFn := utils.GetLogger().Debug
if len(oldChain) > 63 { if len(oldChain) > 63 {
logFn = log.Warn logFn = utils.GetLogger().Warn
} }
logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
} else { } else {
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) utils.GetLogger().Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
} }
// Insert the new chain, taking care of the proper incremental order // Insert the new chain, taking care of the proper incremental order
var addedTxs types.Transactions var addedTxs types.Transactions
@ -1491,7 +1490,7 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
for _, receipt := range receipts { for _, receipt := range receipts {
receiptString += fmt.Sprintf("\t%v\n", receipt) receiptString += fmt.Sprintf("\t%v\n", receipt)
} }
log.Error(fmt.Sprintf(` utils.GetLogger().Error(fmt.Sprintf(`
########## BAD BLOCK ######### ########## BAD BLOCK #########
Chain config: %v Chain config: %v

@ -28,8 +28,10 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
) )
// ChainIndexerBackend defines the methods needed to process chain segments in // ChainIndexerBackend defines the methods needed to process chain segments in
@ -107,7 +109,7 @@ func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend Cha
sectionSize: section, sectionSize: section,
confirmsReq: confirm, confirmsReq: confirm,
throttling: throttling, throttling: throttling,
log: log.New("type", kind), log: utils.GetLogInstance().New("type", kind),
} }
// Initialize database dependent fields and start the updater // Initialize database dependent fields and start the updater
c.loadValidSections() c.loadValidSections()

@ -25,14 +25,13 @@ import (
"math/big" "math/big"
"os" "os"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
@ -163,10 +162,10 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
stored := rawdb.ReadCanonicalHash(db, 0) stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) { if (stored == common.Hash{}) {
if genesis == nil { if genesis == nil {
log.Info("Writing default main-net genesis block") utils.GetLogger().Info("Writing default main-net genesis block")
genesis = DefaultGenesisBlock() genesis = DefaultGenesisBlock()
} else { } else {
log.Info("Writing custom genesis block") utils.GetLogger().Info("Writing custom genesis block")
} }
block, err := genesis.Commit(db) block, err := genesis.Commit(db)
return genesis.Config, block.Hash(), err return genesis.Config, block.Hash(), err
@ -184,7 +183,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
newcfg := genesis.configOrDefault(stored) newcfg := genesis.configOrDefault(stored)
storedcfg := rawdb.ReadChainConfig(db, stored) storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil { if storedcfg == nil {
log.Warn("Found genesis block without chain config") utils.GetLogger().Warn("Found genesis block without chain config")
rawdb.WriteChainConfig(db, stored, newcfg) rawdb.WriteChainConfig(db, stored, newcfg)
return newcfg, stored, nil return newcfg, stored, nil
} }
@ -282,7 +281,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
err := rawdb.WriteShardStateBytes(db, block.Header().Epoch, block.Header().ShardState) err := rawdb.WriteShardStateBytes(db, block.Header().Epoch, block.Header().ShardState)
if err != nil { if err != nil {
log.Crit("Failed to store genesis shard state", "err", err) utils.GetLogger().Crit("Failed to store genesis shard state", "err", err)
} }
config := g.Config config := g.Config

@ -28,12 +28,13 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
lru "github.com/hashicorp/golang-lru"
consensus_engine "github.com/harmony-one/harmony/consensus/engine" consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
lru "github.com/hashicorp/golang-lru" "github.com/harmony-one/harmony/internal/utils"
) )
const ( const (
@ -143,7 +144,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// Irrelevant of the canonical status, write the td and header to the database // Irrelevant of the canonical status, write the td and header to the database
//if err := hc.WriteTd(hash, number, externTd); err != nil { //if err := hc.WriteTd(hash, number, externTd); err != nil {
// // log.Crit("Failed to write header total difficulty", "err", err) // // utils.GetLogger().Crit("Failed to write header total difficulty", "err", err)
// //} // //}
//rawdb.WriteHeader(hc.chainDb, header) //rawdb.WriteHeader(hc.chainDb, header)
@ -206,7 +207,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
for i := 1; i < len(chain); i++ { for i := 1; i < len(chain); i++ {
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() { if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() {
// Chain broke ancestry, log a message (programming error) and skip insertion // Chain broke ancestry, log a message (programming error) and skip insertion
log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(), utils.GetLogger().Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(),
"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash()) "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash())
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number, return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
@ -232,7 +233,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
//for i, _ := range chain { //for i, _ := range chain {
// // If the chain is terminating, stop processing blocks // // If the chain is terminating, stop processing blocks
// if hc.procInterrupt() { // if hc.procInterrupt() {
// log.Debug("Premature abort during headers verification") // utils.GetLogger().Debug("Premature abort during headers verification")
// return 0, errors.New("aborted") // return 0, errors.New("aborted")
// } // }
// //
@ -260,7 +261,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
for i, header := range chain { for i, header := range chain {
// Short circuit insertion if shutting down // Short circuit insertion if shutting down
if hc.procInterrupt() { if hc.procInterrupt() {
log.Debug("Premature abort during headers import") utils.GetLogger().Debug("Premature abort during headers import")
return i, errors.New("aborted") return i, errors.New("aborted")
} }
// If the header's already known, skip it, otherwise store // If the header's already known, skip it, otherwise store
@ -286,7 +287,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
if stats.ignored > 0 { if stats.ignored > 0 {
context = append(context, []interface{}{"ignored", stats.ignored}...) context = append(context, []interface{}{"ignored", stats.ignored}...)
} }
log.Info("Imported new block headers", context...) utils.GetLogger().Info("Imported new block headers", context...)
return 0, nil return 0, nil
} }

@ -22,7 +22,6 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
@ -42,14 +41,14 @@ func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash {
// WriteCanonicalHash stores the hash assigned to a canonical block number. // WriteCanonicalHash stores the hash assigned to a canonical block number.
func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) { func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) {
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
log.Crit("Failed to store number to hash mapping", "err", err) utils.GetLogger().Crit("Failed to store number to hash mapping", "err", err)
} }
} }
// DeleteCanonicalHash removes the number to hash canonical mapping. // DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db DatabaseDeleter, number uint64) { func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
if err := db.Delete(headerHashKey(number)); err != nil { if err := db.Delete(headerHashKey(number)); err != nil {
log.Crit("Failed to delete number to hash mapping", "err", err) utils.GetLogger().Crit("Failed to delete number to hash mapping", "err", err)
} }
} }
@ -75,7 +74,7 @@ func ReadHeadHeaderHash(db DatabaseReader) common.Hash {
// WriteHeadHeaderHash stores the hash of the current canonical head header. // WriteHeadHeaderHash stores the hash of the current canonical head header.
func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) { func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last header's hash", "err", err) utils.GetLogger().Crit("Failed to store last header's hash", "err", err)
} }
} }
@ -91,7 +90,7 @@ func ReadHeadBlockHash(db DatabaseReader) common.Hash {
// WriteHeadBlockHash stores the head block's hash. // WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) { func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil { if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last block's hash", "err", err) utils.GetLogger().Crit("Failed to store last block's hash", "err", err)
} }
} }
@ -107,7 +106,7 @@ func ReadHeadFastBlockHash(db DatabaseReader) common.Hash {
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block. // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) { func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) {
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil { if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last fast block's hash", "err", err) utils.GetLogger().Crit("Failed to store last fast block's hash", "err", err)
} }
} }
@ -125,7 +124,7 @@ func ReadFastTrieProgress(db DatabaseReader) uint64 {
// retrieving it across restarts. // retrieving it across restarts.
func WriteFastTrieProgress(db DatabaseWriter, count uint64) { func WriteFastTrieProgress(db DatabaseWriter, count uint64) {
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil { if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
log.Crit("Failed to store fast sync trie progress", "err", err) utils.GetLogger().Crit("Failed to store fast sync trie progress", "err", err)
} }
} }
@ -151,7 +150,7 @@ func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Heade
} }
header := new(types.Header) header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil { if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Error("Invalid block header RLP", "hash", hash, "err", err) utils.GetLogger().Error("Invalid block header RLP", "hash", hash, "err", err)
return nil return nil
} }
return header return header
@ -168,26 +167,26 @@ func WriteHeader(db DatabaseWriter, header *types.Header) {
) )
key := headerNumberKey(hash) key := headerNumberKey(hash)
if err := db.Put(key, encoded); err != nil { if err := db.Put(key, encoded); err != nil {
log.Crit("Failed to store hash to number mapping", "err", err) utils.GetLogger().Crit("Failed to store hash to number mapping", "err", err)
} }
// Write the encoded header // Write the encoded header
data, err := rlp.EncodeToBytes(header) data, err := rlp.EncodeToBytes(header)
if err != nil { if err != nil {
log.Crit("Failed to RLP encode header", "err", err) utils.GetLogger().Crit("Failed to RLP encode header", "err", err)
} }
key = headerKey(number, hash) key = headerKey(number, hash)
if err := db.Put(key, data); err != nil { if err := db.Put(key, data); err != nil {
log.Crit("Failed to store header", "err", err) utils.GetLogger().Crit("Failed to store header", "err", err)
} }
} }
// DeleteHeader removes all block header data associated with a hash. // DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(headerKey(number, hash)); err != nil { if err := db.Delete(headerKey(number, hash)); err != nil {
log.Crit("Failed to delete header", "err", err) utils.GetLogger().Crit("Failed to delete header", "err", err)
} }
if err := db.Delete(headerNumberKey(hash)); err != nil { if err := db.Delete(headerNumberKey(hash)); err != nil {
log.Crit("Failed to delete hash to number mapping", "err", err) utils.GetLogger().Crit("Failed to delete hash to number mapping", "err", err)
} }
} }
@ -200,7 +199,7 @@ func ReadBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValu
// WriteBodyRLP stores an RLP encoded block body into the database. // WriteBodyRLP stores an RLP encoded block body into the database.
func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
log.Crit("Failed to store block body", "err", err) utils.GetLogger().Crit("Failed to store block body", "err", err)
} }
} }
@ -220,7 +219,7 @@ func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
} }
body := new(types.Body) body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil { if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
log.Error("Invalid block body RLP", "hash", hash, "err", err) utils.GetLogger().Error("Invalid block body RLP", "hash", hash, "err", err)
return nil return nil
} }
return body return body
@ -230,7 +229,7 @@ func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) { func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) {
data, err := rlp.EncodeToBytes(body) data, err := rlp.EncodeToBytes(body)
if err != nil { if err != nil {
log.Crit("Failed to RLP encode body", "err", err) utils.GetLogger().Crit("Failed to RLP encode body", "err", err)
} }
WriteBodyRLP(db, hash, number, data) WriteBodyRLP(db, hash, number, data)
} }
@ -238,7 +237,7 @@ func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.B
// DeleteBody removes all block body data associated with a hash. // DeleteBody removes all block body data associated with a hash.
func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(blockBodyKey(number, hash)); err != nil { if err := db.Delete(blockBodyKey(number, hash)); err != nil {
log.Crit("Failed to delete block body", "err", err) utils.GetLogger().Crit("Failed to delete block body", "err", err)
} }
} }
@ -250,7 +249,7 @@ func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
} }
td := new(big.Int) td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil { if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) utils.GetLogger().Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
return nil return nil
} }
return td return td
@ -260,17 +259,17 @@ func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) { func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) {
data, err := rlp.EncodeToBytes(td) data, err := rlp.EncodeToBytes(td)
if err != nil { if err != nil {
log.Crit("Failed to RLP encode block total difficulty", "err", err) utils.GetLogger().Crit("Failed to RLP encode block total difficulty", "err", err)
} }
if err := db.Put(headerTDKey(number, hash), data); err != nil { if err := db.Put(headerTDKey(number, hash), data); err != nil {
log.Crit("Failed to store block total difficulty", "err", err) utils.GetLogger().Crit("Failed to store block total difficulty", "err", err)
} }
} }
// DeleteTd removes all block total difficulty data associated with a hash. // DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(headerTDKey(number, hash)); err != nil { if err := db.Delete(headerTDKey(number, hash)); err != nil {
log.Crit("Failed to delete block total difficulty", "err", err) utils.GetLogger().Crit("Failed to delete block total difficulty", "err", err)
} }
} }
@ -284,7 +283,7 @@ func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Rece
// Convert the receipts from their storage form to their internal representation // Convert the receipts from their storage form to their internal representation
storageReceipts := []*types.ReceiptForStorage{} storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
log.Error("Invalid receipt array RLP", "hash", hash, "err", err) utils.GetLogger().Error("Invalid receipt array RLP", "hash", hash, "err", err)
return nil return nil
} }
receipts := make(types.Receipts, len(storageReceipts)) receipts := make(types.Receipts, len(storageReceipts))
@ -303,18 +302,18 @@ func WriteReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts
} }
bytes, err := rlp.EncodeToBytes(storageReceipts) bytes, err := rlp.EncodeToBytes(storageReceipts)
if err != nil { if err != nil {
log.Crit("Failed to encode block receipts", "err", err) utils.GetLogger().Crit("Failed to encode block receipts", "err", err)
} }
// Store the flattened receipt slice // Store the flattened receipt slice
if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
log.Crit("Failed to store block receipts", "err", err) utils.GetLogger().Crit("Failed to store block receipts", "err", err)
} }
} }
// DeleteReceipts removes all receipt data associated with a block hash. // DeleteReceipts removes all receipt data associated with a block hash.
func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) { func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil { if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
log.Crit("Failed to delete block receipts", "err", err) utils.GetLogger().Crit("Failed to delete block receipts", "err", err)
} }
} }

@ -18,9 +18,10 @@ package rawdb
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
) )
// ReadTxLookupEntry retrieves the positional metadata associated with a transaction // ReadTxLookupEntry retrieves the positional metadata associated with a transaction
@ -32,7 +33,7 @@ func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64
} }
var entry TxLookupEntry var entry TxLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil { if err := rlp.DecodeBytes(data, &entry); err != nil {
log.Error("Invalid transaction lookup entry RLP", "hash", hash, "err", err) utils.GetLogger().Error("Invalid transaction lookup entry RLP", "hash", hash, "err", err)
return common.Hash{}, 0, 0 return common.Hash{}, 0, 0
} }
return entry.BlockHash, entry.BlockIndex, entry.Index return entry.BlockHash, entry.BlockIndex, entry.Index
@ -49,10 +50,10 @@ func WriteTxLookupEntries(db DatabaseWriter, block *types.Block) {
} }
data, err := rlp.EncodeToBytes(entry) data, err := rlp.EncodeToBytes(entry)
if err != nil { if err != nil {
log.Crit("Failed to encode transaction lookup entry", "err", err) utils.GetLogger().Crit("Failed to encode transaction lookup entry", "err", err)
} }
if err := db.Put(txLookupKey(tx.Hash()), data); err != nil { if err := db.Put(txLookupKey(tx.Hash()), data); err != nil {
log.Crit("Failed to store transaction lookup entry", "err", err) utils.GetLogger().Crit("Failed to store transaction lookup entry", "err", err)
} }
} }
} }
@ -71,7 +72,7 @@ func ReadTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, c
} }
body := ReadBody(db, blockHash, blockNumber) body := ReadBody(db, blockHash, blockNumber)
if body == nil || len(body.Transactions) <= int(txIndex) { if body == nil || len(body.Transactions) <= int(txIndex) {
log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex) utils.GetLogger().Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex)
return nil, common.Hash{}, 0, 0 return nil, common.Hash{}, 0, 0
} }
return body.Transactions[txIndex], blockHash, blockNumber, txIndex return body.Transactions[txIndex], blockHash, blockNumber, txIndex
@ -86,7 +87,7 @@ func ReadReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Ha
} }
receipts := ReadReceipts(db, blockHash, blockNumber) receipts := ReadReceipts(db, blockHash, blockNumber)
if len(receipts) <= int(receiptIndex) { if len(receipts) <= int(receiptIndex) {
log.Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex) utils.GetLogger().Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex)
return nil, common.Hash{}, 0, 0 return nil, common.Hash{}, 0, 0
} }
return receipts[receiptIndex], blockHash, blockNumber, receiptIndex return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
@ -102,6 +103,6 @@ func ReadBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash
// section and bit index. // section and bit index.
func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) { func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) {
if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil { if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil {
log.Crit("Failed to store bloom bits", "err", err) utils.GetLogger().Crit("Failed to store bloom bits", "err", err)
} }
} }

@ -20,9 +20,10 @@ import (
"encoding/json" "encoding/json"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/internal/utils"
) )
// ReadDatabaseVersion retrieves the version number of the database. // ReadDatabaseVersion retrieves the version number of the database.
@ -39,7 +40,7 @@ func ReadDatabaseVersion(db DatabaseReader) int {
func WriteDatabaseVersion(db DatabaseWriter, version int) { func WriteDatabaseVersion(db DatabaseWriter, version int) {
enc, _ := rlp.EncodeToBytes(version) enc, _ := rlp.EncodeToBytes(version)
if err := db.Put(databaseVerisionKey, enc); err != nil { if err := db.Put(databaseVerisionKey, enc); err != nil {
log.Crit("Failed to store the database version", "err", err) utils.GetLogger().Crit("Failed to store the database version", "err", err)
} }
} }
@ -51,7 +52,7 @@ func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig {
} }
var config params.ChainConfig var config params.ChainConfig
if err := json.Unmarshal(data, &config); err != nil { if err := json.Unmarshal(data, &config); err != nil {
log.Error("Invalid chain config JSON", "hash", hash, "err", err) utils.GetLogger().Error("Invalid chain config JSON", "hash", hash, "err", err)
return nil return nil
} }
return &config return &config
@ -64,10 +65,10 @@ func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConf
} }
data, err := json.Marshal(cfg) data, err := json.Marshal(cfg)
if err != nil { if err != nil {
log.Crit("Failed to JSON encode chain config", "err", err) utils.GetLogger().Crit("Failed to JSON encode chain config", "err", err)
} }
if err := db.Put(configKey(hash), data); err != nil { if err := db.Put(configKey(hash), data); err != nil {
log.Crit("Failed to store chain config", "err", err) utils.GetLogger().Crit("Failed to store chain config", "err", err)
} }
} }
@ -82,7 +83,7 @@ func ReadPreimage(db DatabaseReader, hash common.Hash) []byte {
func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) { func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages { for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil { if err := db.Put(preimageKey(hash), preimage); err != nil {
log.Crit("Failed to store trie preimage", "err", err) utils.GetLogger().Crit("Failed to store trie preimage", "err", err)
} }
} }
preimageCounter.Inc(int64(len(preimages))) preimageCounter.Inc(int64(len(preimages)))

@ -22,9 +22,10 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/utils"
) )
var ( var (
@ -213,7 +214,7 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
ret, st.gas, vmerr = evm.Call(sender, st.to(), st.data, st.gas, st.value) ret, st.gas, vmerr = evm.Call(sender, st.to(), st.data, st.gas, st.value)
} }
if vmerr != nil { if vmerr != nil {
log.Debug("VM returned with error", "err", vmerr) utils.GetLogger().Debug("VM returned with error", "err", vmerr)
// The only possible consensus-error would be if there wasn't // The only possible consensus-error would be if there wasn't
// sufficient balance to make the transfer happen. The first // sufficient balance to make the transfer happen. The first
// balance transfer may never fail. // balance transfer may never fail.

@ -22,9 +22,10 @@ import (
"os" "os"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
) )
// errNoActiveJournal is returned if a transaction is attempted to be inserted // errNoActiveJournal is returned if a transaction is attempted to be inserted
@ -82,7 +83,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
loadBatch := func(txs types.Transactions) { loadBatch := func(txs types.Transactions) {
for _, err := range add(txs) { for _, err := range add(txs) {
if err != nil { if err != nil {
log.Debug("Failed to add journaled transaction", "err", err) utils.GetLogger().Debug("Failed to add journaled transaction", "err", err)
dropped++ dropped++
} }
} }
@ -111,7 +112,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
batch = batch[:0] batch = batch[:0]
} }
} }
log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) utils.GetLogger().Info("Loaded local transaction journal", "transactions", total, "dropped", dropped)
return failure return failure
} }
@ -160,7 +161,7 @@ func (journal *txJournal) rotate(all map[common.Address]types.Transactions) erro
return err return err
} }
journal.writer = sink journal.writer = sink
log.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all)) utils.GetLogger().Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all))
return nil return nil
} }

@ -23,8 +23,9 @@ import (
"sort" "sort"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
) )
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for // nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
@ -485,7 +486,7 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) boo
} }
// Check if the transaction is underpriced or not // Check if the transaction is underpriced or not
if len(*l.items) == 0 { if len(*l.items) == 0 {
log.Error("Pricing query for empty pool") // This cannot happen, print to catch programming errors utils.GetLogger().Error("Pricing query for empty pool") // This cannot happen, print to catch programming errors
return false return false
} }
cheapest := []*types.Transaction(*l.items)[0] cheapest := []*types.Transaction(*l.items)[0]

@ -29,11 +29,12 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
) )
const ( const (
@ -163,15 +164,15 @@ var DefaultTxPoolConfig = TxPoolConfig{
func (config *TxPoolConfig) sanitize() TxPoolConfig { func (config *TxPoolConfig) sanitize() TxPoolConfig {
conf := *config conf := *config
if conf.Rejournal < time.Second { if conf.Rejournal < time.Second {
log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) utils.GetLogger().Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
conf.Rejournal = time.Second conf.Rejournal = time.Second
} }
if conf.PriceLimit < 1 { if conf.PriceLimit < 1 {
log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) utils.GetLogger().Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
conf.PriceLimit = DefaultTxPoolConfig.PriceLimit conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
} }
if conf.PriceBump < 1 { if conf.PriceBump < 1 {
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) utils.GetLogger().Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
conf.PriceBump = DefaultTxPoolConfig.PriceBump conf.PriceBump = DefaultTxPoolConfig.PriceBump
} }
return conf return conf
@ -235,7 +236,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
} }
pool.locals = newAccountSet(pool.signer) pool.locals = newAccountSet(pool.signer)
for _, addr := range config.Locals { for _, addr := range config.Locals {
log.Info("Setting new local account", "address", addr) utils.GetLogger().Info("Setting new local account", "address", addr)
pool.locals.add(addr) pool.locals.add(addr)
} }
pool.priced = newTxPricedList(pool.all) pool.priced = newTxPricedList(pool.all)
@ -246,10 +247,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
pool.journal = newTxJournal(config.Journal) pool.journal = newTxJournal(config.Journal)
if err := pool.journal.load(pool.AddLocals); err != nil { if err := pool.journal.load(pool.AddLocals); err != nil {
log.Warn("Failed to load transaction journal", "err", err) utils.GetLogger().Warn("Failed to load transaction journal", "err", err)
} }
if err := pool.journal.rotate(pool.local()); err != nil { if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate transaction journal", "err", err) utils.GetLogger().Warn("Failed to rotate transaction journal", "err", err)
} }
} }
// Subscribe events from blockchain // Subscribe events from blockchain
@ -310,7 +311,7 @@ func (pool *TxPool) loop() {
pool.mu.RUnlock() pool.mu.RUnlock()
if pending != prevPending || queued != prevQueued || stales != prevStales { if pending != prevPending || queued != prevQueued || stales != prevStales {
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) utils.GetLogger().Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
prevPending, prevQueued, prevStales = pending, queued, stales prevPending, prevQueued, prevStales = pending, queued, stales
} }
@ -336,7 +337,7 @@ func (pool *TxPool) loop() {
if pool.journal != nil { if pool.journal != nil {
pool.mu.Lock() pool.mu.Lock()
if err := pool.journal.rotate(pool.local()); err != nil { if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate local tx journal", "err", err) utils.GetLogger().Warn("Failed to rotate local tx journal", "err", err)
} }
pool.mu.Unlock() pool.mu.Unlock()
} }
@ -365,7 +366,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
newNum := newHead.Number.Uint64() newNum := newHead.Number.Uint64()
if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
log.Debug("Skipping deep transaction reorg", "depth", depth) utils.GetLogger().Debug("Skipping deep transaction reorg", "depth", depth)
} else { } else {
// Reorg seems shallow enough to pull in all transactions into memory // Reorg seems shallow enough to pull in all transactions into memory
var discarded, included types.Transactions var discarded, included types.Transactions
@ -377,26 +378,26 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
for rem.NumberU64() > add.NumberU64() { for rem.NumberU64() > add.NumberU64() {
discarded = append(discarded, rem.Transactions()...) discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) utils.GetLogger().Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
return return
} }
} }
for add.NumberU64() > rem.NumberU64() { for add.NumberU64() > rem.NumberU64() {
included = append(included, add.Transactions()...) included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) utils.GetLogger().Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
return return
} }
} }
for rem.Hash() != add.Hash() { for rem.Hash() != add.Hash() {
discarded = append(discarded, rem.Transactions()...) discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) utils.GetLogger().Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
return return
} }
included = append(included, add.Transactions()...) included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) utils.GetLogger().Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
return return
} }
} }
@ -409,7 +410,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
} }
statedb, err := pool.chain.StateAt(newHead.Root) statedb, err := pool.chain.StateAt(newHead.Root)
if err != nil { if err != nil {
log.Error("Failed to reset txpool state", "err", err) utils.GetLogger().Error("Failed to reset txpool state", "err", err)
return return
} }
pool.currentState = statedb pool.currentState = statedb
@ -417,7 +418,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
pool.currentMaxGas = newHead.GasLimit pool.currentMaxGas = newHead.GasLimit
// Inject any transactions discarded due to reorgs // Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject)) utils.GetLogger().Debug("Reinjecting stale transactions", "count", len(reinject))
//senderCacher.recover(pool.signer, reinject) //senderCacher.recover(pool.signer, reinject)
pool.addTxsLocked(reinject, false) pool.addTxsLocked(reinject, false)
@ -449,7 +450,7 @@ func (pool *TxPool) Stop() {
if pool.journal != nil { if pool.journal != nil {
pool.journal.close() pool.journal.close()
} }
log.Info("Transaction pool stopped") utils.GetLogger().Info("Transaction pool stopped")
} }
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
@ -476,7 +477,7 @@ func (pool *TxPool) SetGasPrice(price *big.Int) {
for _, tx := range pool.priced.Cap(price, pool.locals) { for _, tx := range pool.priced.Cap(price, pool.locals) {
pool.removeTx(tx.Hash(), false) pool.removeTx(tx.Hash(), false)
} }
log.Info("Transaction pool price threshold updated", "price", price) utils.GetLogger().Info("Transaction pool price threshold updated", "price", price)
} }
// State returns the virtual managed state of the transaction pool. // State returns the virtual managed state of the transaction pool.
@ -622,12 +623,12 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
// If the transaction is already known, discard it // If the transaction is already known, discard it
hash := tx.Hash() hash := tx.Hash()
if pool.all.Get(hash) != nil { if pool.all.Get(hash) != nil {
log.Trace("Discarding already known transaction", "hash", hash) utils.GetLogger().Trace("Discarding already known transaction", "hash", hash)
return false, fmt.Errorf("known transaction: %x", hash) return false, fmt.Errorf("known transaction: %x", hash)
} }
// If the transaction fails basic validation, discard it // If the transaction fails basic validation, discard it
if err := pool.validateTx(tx, local); err != nil { if err := pool.validateTx(tx, local); err != nil {
log.Trace("Discarding invalid transaction", "hash", hash, "err", err) utils.GetLogger().Trace("Discarding invalid transaction", "hash", hash, "err", err)
invalidTxCounter.Inc(1) invalidTxCounter.Inc(1)
return false, err return false, err
} }
@ -635,14 +636,14 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it // If the new transaction is underpriced, don't accept it
if !local && pool.priced.Underpriced(tx, pool.locals) { if !local && pool.priced.Underpriced(tx, pool.locals) {
log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) utils.GetLogger().Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
underpricedTxCounter.Inc(1) underpricedTxCounter.Inc(1)
return false, ErrUnderpriced return false, ErrUnderpriced
} }
// New transaction is better than our worse ones, make room for it // New transaction is better than our worse ones, make room for it
drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
for _, tx := range drop { for _, tx := range drop {
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) utils.GetLogger().Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
underpricedTxCounter.Inc(1) underpricedTxCounter.Inc(1)
pool.removeTx(tx.Hash(), false) pool.removeTx(tx.Hash(), false)
} }
@ -666,7 +667,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
pool.priced.Put(tx) pool.priced.Put(tx)
pool.journalTx(from, tx) pool.journalTx(from, tx)
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) utils.GetLogger().Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
// We've directly injected a replacement transaction, notify subsystems // We've directly injected a replacement transaction, notify subsystems
// go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}}) // go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}})
@ -681,13 +682,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
// Mark local addresses and journal local transactions // Mark local addresses and journal local transactions
if local { if local {
if !pool.locals.contains(from) { if !pool.locals.contains(from) {
log.Info("Setting new local account", "address", from) utils.GetLogger().Info("Setting new local account", "address", from)
pool.locals.add(from) pool.locals.add(from)
} }
} }
pool.journalTx(from, tx) pool.journalTx(from, tx)
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) utils.GetLogger().Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
return replace, nil return replace, nil
} }
@ -735,7 +736,7 @@ func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
return return
} }
if err := pool.journal.insert(tx); err != nil { if err := pool.journal.insert(tx); err != nil {
log.Warn("Failed to journal local transaction", "err", err) utils.GetLogger().Warn("Failed to journal local transaction", "err", err)
} }
} }
@ -949,7 +950,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Drop all transactions that are deemed too old (low nonce) // Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) { for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) {
hash := tx.Hash() hash := tx.Hash()
log.Trace("Removed old queued transaction", "hash", hash) utils.GetLogger().Trace("Removed old queued transaction", "hash", hash)
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
} }
@ -957,7 +958,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
for _, tx := range drops { for _, tx := range drops {
hash := tx.Hash() hash := tx.Hash()
log.Trace("Removed unpayable queued transaction", "hash", hash) utils.GetLogger().Trace("Removed unpayable queued transaction", "hash", hash)
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
queuedNofundsCounter.Inc(1) queuedNofundsCounter.Inc(1)
@ -966,7 +967,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) { for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
hash := tx.Hash() hash := tx.Hash()
if pool.promoteTx(addr, hash, tx) { if pool.promoteTx(addr, hash, tx) {
log.Trace("Promoting queued transaction", "hash", hash) utils.GetLogger().Trace("Promoting queued transaction", "hash", hash)
promoted = append(promoted, tx) promoted = append(promoted, tx)
} }
} }
@ -977,7 +978,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
queuedRateLimitCounter.Inc(1) queuedRateLimitCounter.Inc(1)
log.Trace("Removed cap-exceeding queued transaction", "hash", hash) utils.GetLogger().Trace("Removed cap-exceeding queued transaction", "hash", hash)
} }
} }
// Delete the entire queue entry if it became empty. // Delete the entire queue entry if it became empty.
@ -1030,7 +1031,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce { if nonce := tx.Nonce(); pool.pendingState.GetNonce(offenders[i]) > nonce {
pool.pendingState.SetNonce(offenders[i], nonce) pool.pendingState.SetNonce(offenders[i], nonce)
} }
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) utils.GetLogger().Trace("Removed fairness-exceeding pending transaction", "hash", hash)
} }
pending-- pending--
} }
@ -1052,7 +1053,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce { if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce {
pool.pendingState.SetNonce(addr, nonce) pool.pendingState.SetNonce(addr, nonce)
} }
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) utils.GetLogger().Trace("Removed fairness-exceeding pending transaction", "hash", hash)
} }
pending-- pending--
} }
@ -1113,7 +1114,7 @@ func (pool *TxPool) demoteUnexecutables() {
// Drop all transactions that are deemed too old (low nonce) // Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(nonce) { for _, tx := range list.Forward(nonce) {
hash := tx.Hash() hash := tx.Hash()
log.Trace("Removed old pending transaction", "hash", hash) utils.GetLogger().Trace("Removed old pending transaction", "hash", hash)
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
} }
@ -1121,21 +1122,21 @@ func (pool *TxPool) demoteUnexecutables() {
drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
for _, tx := range drops { for _, tx := range drops {
hash := tx.Hash() hash := tx.Hash()
log.Trace("Removed unpayable pending transaction", "hash", hash) utils.GetLogger().Trace("Removed unpayable pending transaction", "hash", hash)
pool.all.Remove(hash) pool.all.Remove(hash)
pool.priced.Removed() pool.priced.Removed()
pendingNofundsCounter.Inc(1) pendingNofundsCounter.Inc(1)
} }
for _, tx := range invalids { for _, tx := range invalids {
hash := tx.Hash() hash := tx.Hash()
log.Trace("Demoting pending transaction", "hash", hash) utils.GetLogger().Trace("Demoting pending transaction", "hash", hash)
pool.enqueueTx(hash, tx) pool.enqueueTx(hash, tx)
} }
// If there's a gap in front, alert (should never happen) and postpone all transactions // If there's a gap in front, alert (should never happen) and postpone all transactions
if list.Len() > 0 && list.txs.Get(nonce) == nil { if list.Len() > 0 && list.txs.Get(nonce) == nil {
for _, tx := range list.Cap(0) { for _, tx := range list.Cap(0) {
hash := tx.Hash() hash := tx.Hash()
log.Error("Demoting invalidated transaction", "hash", hash) utils.GetLogger().Error("Demoting invalidated transaction", "hash", hash)
pool.enqueueTx(hash, tx) pool.enqueueTx(hash, tx)
} }
} }

Loading…
Cancel
Save