Fix usages. (#4241)

Co-authored-by: Konstantin <k.potapov@softpro.com>
pull/4246/head
Konstantin 2 years ago committed by GitHub
parent 2afaeb9f94
commit e4d008436c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 8
      cmd/harmony/dumpdb.go
  2. 4
      core/blockchain_impl.go
  3. 20
      core/chain_indexer.go
  4. 12
      core/headerchain.go
  5. 38
      hmy/bloombits.go
  6. 5
      hmy/hmy.go

@ -87,6 +87,14 @@ type KakashiDB struct {
cache *lru.Cache
}
func (db *KakashiDB) GetCanonicalHash(number uint64) common.Hash {
return rawdb.ReadCanonicalHash(db, number)
}
func (db *KakashiDB) ChainDb() ethdb.Database {
return db
}
const (
MB = 1024 * 1024
BLOCKS_DUMP = 512 // must >= 256

@ -633,7 +633,7 @@ func (bc *BlockChainImpl) ExportN(w io.Writer, first uint64, last uint64) error
// writeHeadBlock writes a new head block
func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error {
// If the block is on a side chain or an unknown one, force other heads onto it too
updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
updateHeads := bc.GetCanonicalHash(block.NumberU64()) != block.Hash()
// Add the block to the canonical chain number scheme and mark as the head
batch := bc.ChainDb().NewBatch()
@ -765,7 +765,7 @@ func (bc *BlockChainImpl) GetBlockByHash(hash common.Hash) *types.Block {
}
func (bc *BlockChainImpl) GetBlockByNumber(number uint64) *types.Block {
hash := rawdb.ReadCanonicalHash(bc.db, number)
hash := bc.GetCanonicalHash(number)
if hash == (common.Hash{}) {
return nil
}

@ -34,6 +34,12 @@ import (
"github.com/harmony-one/harmony/internal/utils"
)
type Chain interface {
GetCanonicalHash(number uint64) common.Hash
GetHeader(hash common.Hash, number uint64) *block.Header
ChainDb() ethdb.Database
}
// ChainIndexerBackend defines the methods needed to process chain segments in
// the background and write the segment results into the database. These can be
// used to create filter blooms or CHTs.
@ -69,7 +75,7 @@ type ChainIndexerChain interface {
// after an entire section has been finished or in case of rollbacks that might
// affect already finished sections.
type ChainIndexer struct {
chainDb ethdb.Database // Chain database to index the data from
chainDb Chain // Chain database to index the data from
indexDb ethdb.Database // Prefixed table-view of the db to write index metadata into
backend ChainIndexerBackend // Background processor generating the index data content
children []*ChainIndexer // Child indexers to cascade chain updates to
@ -99,7 +105,7 @@ type ChainIndexer struct {
// NewChainIndexer creates a new chain indexer to do background processing on
// chain segments of a given size after certain number of confirmations passed.
// The throttling parameter might be used to prevent database thrashing.
func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
func NewChainIndexer(chainDb Chain, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
logger := utils.Logger().With().Str("type", kind).Logger()
c := &ChainIndexer{
chainDb: chainDb,
@ -225,8 +231,8 @@ func (c *ChainIndexer) eventLoop(currentHeader *block.Header, events chan ChainH
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number().Uint64()) != prevHash {
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
if c.chainDb.GetCanonicalHash(prevHeader.Number().Uint64()) != prevHash {
if h := rawdb.FindCommonAncestor(c.chainDb.ChainDb(), prevHeader, header); h != nil {
c.newHead(h.Number().Uint64(), true)
}
}
@ -282,7 +288,7 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
if sections > c.knownSections {
if c.knownSections < c.checkpointSections {
// syncing reached the checkpoint, verify section head
syncedHead := rawdb.ReadCanonicalHash(c.chainDb, c.checkpointSections*c.sectionSize-1)
syncedHead := c.chainDb.GetCanonicalHash(c.checkpointSections*c.sectionSize - 1)
if syncedHead != c.checkpointHead {
c.log.Error().
Uint64("number", c.checkpointSections*c.sectionSize-1).
@ -397,11 +403,11 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
}
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
hash := rawdb.ReadCanonicalHash(c.chainDb, number)
hash := c.chainDb.GetCanonicalHash(number)
if hash == (common.Hash{}) {
return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
}
header := rawdb.ReadHeader(c.chainDb, hash, number)
header := c.chainDb.GetHeader(hash, number)
if header == nil {
return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4])
} else if header.ParentHash() != lastHead {

@ -348,9 +348,9 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma
return common.Hash{}, 0
}
for ancestor != 0 {
if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
if hc.GetCanonicalHash(number) == hash {
number -= ancestor
return rawdb.ReadCanonicalHash(hc.chainDb, number), number
return hc.GetCanonicalHash(number), number
}
if *maxNonCanonical == 0 {
return common.Hash{}, 0
@ -448,6 +448,10 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *block.Header {
}
func (hc *HeaderChain) getHashByNumber(number uint64) common.Hash {
return hc.GetCanonicalHash(number)
}
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
// Since canonical chain is immutable, it's safe to read header
// hash by number from cache.
if hash, ok := hc.canonicalCache.Get(number); ok {
@ -460,10 +464,6 @@ func (hc *HeaderChain) getHashByNumber(number uint64) common.Hash {
return hash
}
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
return rawdb.ReadCanonicalHash(hc.chainDb, number)
}
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
func (hc *HeaderChain) CurrentHeader() *block.Header {

@ -49,38 +49,6 @@ const (
bloomRetrievalWait = time.Duration(0)
)
// startBloomHandlers starts a batch of goroutines to accept bloom bit database
// retrievals from possibly a range of filters and serving the data to satisfy.
func (hmy *Harmony) startBloomHandlers(sectionSize uint64) {
for i := 0; i < bloomServiceThreads; i++ {
go func() {
for {
select {
case <-hmy.ShutdownChan:
return
case request := <-hmy.BloomRequests:
task := <-request
task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections {
head := rawdb.ReadCanonicalHash(hmy.chainDb, (section+1)*sectionSize-1)
if compVector, err := rawdb.ReadBloomBits(hmy.chainDb, task.Bit, section, head); err == nil {
if blob, err := bitutil.DecompressBytes(compVector, int(sectionSize/8)); err == nil {
task.Bitsets[i] = blob
} else {
task.Error = err
}
} else {
task.Error = err
}
}
request <- task
}
}
}()
}
}
const (
// bloomThrottling is the time to wait between processing two consecutive index
// sections. It's useful during chain upgrades to prevent disk overload.
@ -99,12 +67,12 @@ type BloomIndexer struct {
// NewBloomIndexer returns a chain indexer that generates bloom bits data for the
// canonical chain for fast logs filtering.
func NewBloomIndexer(db ethdb.Database, size, confirms uint64) *core.ChainIndexer {
func NewBloomIndexer(db core.Chain, size, confirms uint64) *core.ChainIndexer {
backend := &BloomIndexer{
db: db,
db: db.ChainDb(),
size: size,
}
table := ethRawDB.NewTable(db, string(rawdb.BloomBitsIndexPrefix))
table := ethRawDB.NewTable(db.ChainDb(), string(rawdb.BloomBitsIndexPrefix))
return core.NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits")
}

@ -123,12 +123,11 @@ type NodeAPI interface {
func New(
nodeAPI NodeAPI, txPool *core.TxPool, cxPool *core.CxPool, shardID uint32,
) *Harmony {
chainDb := nodeAPI.Blockchain().ChainDb()
leaderCache, _ := lru.New(leaderCacheSize)
undelegationPayoutsCache, _ := lru.New(undelegationPayoutsCacheSize)
preStakingBlockRewardsCache, _ := lru.New(preStakingBlockRewardsCacheSize)
totalStakeCache := newTotalStakeCache(totalStakeCacheDuration)
bloomIndexer := NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms)
bloomIndexer := NewBloomIndexer(nodeAPI.Blockchain(), params.BloomBitsBlocks, params.BloomConfirms)
bloomIndexer.Start(nodeAPI.Blockchain())
backend := &Harmony{
@ -140,7 +139,7 @@ func New(
TxPool: txPool,
CxPool: cxPool,
eventMux: new(event.TypeMux),
chainDb: chainDb,
chainDb: nodeAPI.Blockchain().ChainDb(),
NodeAPI: nodeAPI,
ChainID: nodeAPI.Blockchain().Config().ChainID.Uint64(),
EthChainID: nodeAPI.Blockchain().Config().EthCompatibleChainID.Uint64(),

Loading…
Cancel
Save