You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
465 lines
17 KiB
465 lines
17 KiB
2 years ago
|
// Copyright 2018 The go-ethereum Authors
|
||
|
// This file is part of the go-ethereum library.
|
||
|
//
|
||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||
|
// the Free Software Foundation, either version 3 of the License, or
|
||
|
// (at your option) any later version.
|
||
|
//
|
||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||
|
// GNU Lesser General Public License for more details.
|
||
|
//
|
||
|
// You should have received a copy of the GNU Lesser General Public License
|
||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||
|
|
||
|
package rawdb
|
||
|
|
||
|
import (
|
||
|
"bytes"
|
||
|
"errors"
|
||
|
"fmt"
|
||
|
"os"
|
||
|
"path"
|
||
|
"path/filepath"
|
||
|
"strings"
|
||
|
"time"
|
||
|
|
||
|
"github.com/ethereum/go-ethereum/common"
|
||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||
|
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
||
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||
|
"github.com/ethereum/go-ethereum/log"
|
||
|
"github.com/harmony-one/harmony/internal/utils"
|
||
|
"github.com/olekukonko/tablewriter"
|
||
|
)
|
||
|
|
||
|
var errNotSupported = errors.New("not supported")
|
||
|
|
||
|
// convertLegacyFn takes a raw freezer entry in an older format and
|
||
|
// returns it in the new format.
|
||
|
type convertLegacyFn = func([]byte) ([]byte, error)
|
||
|
|
||
|
// freezerdb is a database wrapper that enabled freezer data retrievals.
|
||
|
type freezerdb struct {
|
||
|
ancientRoot string
|
||
|
ethdb.KeyValueStore
|
||
|
ethdb.AncientStore
|
||
|
}
|
||
|
|
||
|
// AncientDatadir returns the path of root ancient directory.
|
||
|
func (frdb *freezerdb) AncientDatadir() (string, error) {
|
||
|
return frdb.ancientRoot, nil
|
||
|
}
|
||
|
|
||
|
// Close implements io.Closer, closing both the fast key-value store as well as
|
||
|
// the slow ancient tables.
|
||
|
func (frdb *freezerdb) Close() error {
|
||
|
var errs []error
|
||
|
if err := frdb.AncientStore.Close(); err != nil {
|
||
|
errs = append(errs, err)
|
||
|
}
|
||
|
if err := frdb.KeyValueStore.Close(); err != nil {
|
||
|
errs = append(errs, err)
|
||
|
}
|
||
|
if len(errs) != 0 {
|
||
|
return fmt.Errorf("%v", errs)
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// nofreezedb is a database wrapper that disables freezer data retrievals.
|
||
|
type nofreezedb struct {
|
||
|
ethdb.KeyValueStore
|
||
|
}
|
||
|
|
||
|
// HasAncient returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
|
||
|
return false, errNotSupported
|
||
|
}
|
||
|
|
||
|
// Ancient returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
|
||
|
return nil, errNotSupported
|
||
|
}
|
||
|
|
||
|
// AncientRange returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
|
||
|
return nil, errNotSupported
|
||
|
}
|
||
|
|
||
|
// Ancients returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) Ancients() (uint64, error) {
|
||
|
return 0, errNotSupported
|
||
|
}
|
||
|
|
||
|
// Tail returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) Tail() (uint64, error) {
|
||
|
return 0, errNotSupported
|
||
|
}
|
||
|
|
||
|
// AncientSize returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
|
||
|
return 0, errNotSupported
|
||
|
}
|
||
|
|
||
|
// ModifyAncients is not supported.
|
||
|
func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
|
||
|
return 0, errNotSupported
|
||
|
}
|
||
|
|
||
|
// TruncateHead returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) TruncateHead(items uint64) error {
|
||
|
return errNotSupported
|
||
|
}
|
||
|
|
||
|
// TruncateTail returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) TruncateTail(items uint64) error {
|
||
|
return errNotSupported
|
||
|
}
|
||
|
|
||
|
// Sync returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) Sync() error {
|
||
|
return errNotSupported
|
||
|
}
|
||
|
|
||
|
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
|
||
|
// Unlike other ancient-related methods, this method does not return
|
||
|
// errNotSupported when invoked.
|
||
|
// The reason for this is that the caller might want to do several things:
|
||
|
// 1. Check if something is in freezer,
|
||
|
// 2. If not, check leveldb.
|
||
|
//
|
||
|
// This will work, since the ancient-checks inside 'fn' will return errors,
|
||
|
// and the leveldb work will continue.
|
||
|
//
|
||
|
// If we instead were to return errNotSupported here, then the caller would
|
||
|
// have to explicitly check for that, having an extra clause to do the
|
||
|
// non-ancient operations.
|
||
|
return fn(db)
|
||
|
}
|
||
|
|
||
|
// MigrateTable processes the entries in a given table in sequence
|
||
|
// converting them to a new format if they're of an old format.
|
||
|
func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
|
||
|
return errNotSupported
|
||
|
}
|
||
|
|
||
|
// AncientDatadir returns an error as we don't have a backing chain freezer.
|
||
|
func (db *nofreezedb) AncientDatadir() (string, error) {
|
||
|
return "", errNotSupported
|
||
|
}
|
||
|
|
||
|
// NewDatabase creates a high level database on top of a given key-value data
|
||
|
// store without a freezer moving immutable chain segments into cold storage.
|
||
|
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
||
|
return &nofreezedb{KeyValueStore: db}
|
||
|
}
|
||
|
|
||
|
// resolveChainFreezerDir is a helper function which resolves the absolute path
|
||
|
// of chain freezer by considering backward compatibility.
|
||
|
// This function is NOT used, just ported over from the Ethereum
|
||
|
func resolveChainFreezerDir(ancient string) string {
|
||
|
// Check if the chain freezer is already present in the specified
|
||
|
// sub folder, if not then two possibilities:
|
||
|
// - chain freezer is not initialized
|
||
|
// - chain freezer exists in legacy location (root ancient folder)
|
||
|
freezer := path.Join(ancient, chainFreezerName)
|
||
|
if !common.FileExist(freezer) {
|
||
|
if !common.FileExist(ancient) {
|
||
|
// The entire ancient store is not initialized, still use the sub
|
||
|
// folder for initialization.
|
||
|
} else {
|
||
|
// Ancient root is already initialized, then we hold the assumption
|
||
|
// that chain freezer is also initialized and located in root folder.
|
||
|
// In this case fallback to legacy location.
|
||
|
freezer = ancient
|
||
|
log.Info("Found legacy ancient chain path", "location", ancient)
|
||
|
}
|
||
|
}
|
||
|
return freezer
|
||
|
}
|
||
|
|
||
|
// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
|
||
|
// freezer moving immutable chain segments into cold storage.
|
||
|
func NewMemoryDatabase() ethdb.Database {
|
||
|
return NewDatabase(memorydb.New())
|
||
|
}
|
||
|
|
||
|
// NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
|
||
|
// with an initial starting capacity, but without a freezer moving immutable
|
||
|
// chain segments into cold storage.
|
||
|
func NewMemoryDatabaseWithCap(size int) ethdb.Database {
|
||
|
return NewDatabase(memorydb.NewWithCap(size))
|
||
|
}
|
||
|
|
||
|
// NewLevelDBDatabase creates a persistent key-value database without a freezer
|
||
|
// moving immutable chain segments into cold storage.
|
||
|
func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
|
||
|
db, err := leveldb.New(file, cache, handles, namespace, readonly)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
log.Info("Using LevelDB as the backing database")
|
||
|
return NewDatabase(db), nil
|
||
|
}
|
||
|
|
||
|
const (
|
||
|
dbPebble = "pebble"
|
||
|
dbLeveldb = "leveldb"
|
||
|
)
|
||
|
|
||
|
// hasPreexistingDb checks the given data directory whether a database is already
|
||
|
// instantiated at that location, and if so, returns the type of database (or the
|
||
|
// empty string).
|
||
|
func hasPreexistingDb(path string) string {
|
||
|
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||
|
return "" // No pre-existing db
|
||
|
}
|
||
|
if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil {
|
||
|
if err != nil {
|
||
|
panic(err) // only possible if the pattern is malformed
|
||
|
}
|
||
|
return dbPebble
|
||
|
}
|
||
|
return dbLeveldb
|
||
|
}
|
||
|
|
||
|
// OpenOptions contains the options to apply when opening a database.
|
||
|
// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used.
|
||
|
type OpenOptions struct {
|
||
|
Type string // "leveldb" | "pebble"
|
||
|
Directory string // the datadir
|
||
|
AncientsDirectory string // the ancients-dir
|
||
|
Namespace string // the namespace for database relevant metrics
|
||
|
Cache int // the capacity(in megabytes) of the data caching
|
||
|
Handles int // number of files to be open simultaneously
|
||
|
ReadOnly bool
|
||
|
}
|
||
|
|
||
|
// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
|
||
|
//
|
||
|
// type == null type != null
|
||
|
// +----------------------------------------
|
||
|
// db is non-existent | leveldb default | specified type
|
||
|
// db is existent | from db | specified type (if compatible)
|
||
|
func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
|
||
|
existingDb := hasPreexistingDb(o.Directory)
|
||
|
if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb {
|
||
|
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
|
||
|
}
|
||
|
if o.Type == dbPebble || existingDb == dbPebble {
|
||
|
if PebbleEnabled {
|
||
|
log.Info("Using pebble as the backing database")
|
||
|
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||
|
} else {
|
||
|
return nil, errors.New("db.engine 'pebble' not supported on this platform")
|
||
|
}
|
||
|
}
|
||
|
if len(o.Type) != 0 && o.Type != dbLeveldb {
|
||
|
return nil, fmt.Errorf("unknown db.engine %v", o.Type)
|
||
|
}
|
||
|
log.Info("Using leveldb as the backing database")
|
||
|
// Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly
|
||
|
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||
|
}
|
||
|
|
||
|
type counter uint64
|
||
|
|
||
|
func (c counter) String() string {
|
||
|
return fmt.Sprintf("%d", c)
|
||
|
}
|
||
|
|
||
|
func (c counter) Percentage(current uint64) string {
|
||
|
return fmt.Sprintf("%d", current*100/uint64(c))
|
||
|
}
|
||
|
|
||
|
// stat stores sizes and count for a parameter
|
||
|
type stat struct {
|
||
|
size common.StorageSize
|
||
|
count counter
|
||
|
}
|
||
|
|
||
|
// Add size to the stat and increase the counter by 1
|
||
|
func (s *stat) Add(size common.StorageSize) {
|
||
|
s.size += size
|
||
|
s.count++
|
||
|
}
|
||
|
|
||
|
func (s *stat) Size() string {
|
||
|
return s.size.String()
|
||
|
}
|
||
|
|
||
|
func (s *stat) Count() string {
|
||
|
return s.count.String()
|
||
|
}
|
||
|
|
||
|
// InspectDatabase traverses the entire database and checks the size
|
||
|
// of all different categories of data.
|
||
|
// This function is NOT used, just ported over from the Ethereum
|
||
|
func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
||
|
it := db.NewIterator(keyPrefix, keyStart)
|
||
|
defer it.Release()
|
||
|
|
||
|
var (
|
||
|
count int64
|
||
|
start = time.Now()
|
||
|
logged = time.Now()
|
||
|
|
||
|
// Key-value store statistics
|
||
|
headers stat
|
||
|
bodies stat
|
||
|
receipts stat
|
||
|
tds stat
|
||
|
numHashPairings stat
|
||
|
hashNumPairings stat
|
||
|
tries stat
|
||
|
codes stat
|
||
|
txLookups stat
|
||
|
accountSnaps stat
|
||
|
storageSnaps stat
|
||
|
preimages stat
|
||
|
bloomBits stat
|
||
|
beaconHeaders stat
|
||
|
cliqueSnaps stat
|
||
|
|
||
|
// Les statistic
|
||
|
chtTrieNodes stat
|
||
|
bloomTrieNodes stat
|
||
|
|
||
|
// Meta- and unaccounted data
|
||
|
metadata stat
|
||
|
unaccounted stat
|
||
|
|
||
|
// Totals
|
||
|
total common.StorageSize
|
||
|
)
|
||
|
// Inspect key-value database first.
|
||
|
for it.Next() {
|
||
|
var (
|
||
|
key = it.Key()
|
||
|
size = common.StorageSize(len(key) + len(it.Value()))
|
||
|
)
|
||
|
total += size
|
||
|
switch {
|
||
|
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
|
||
|
headers.Add(size)
|
||
|
case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
|
||
|
bodies.Add(size)
|
||
|
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
|
||
|
receipts.Add(size)
|
||
|
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
|
||
|
tds.Add(size)
|
||
|
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
|
||
|
numHashPairings.Add(size)
|
||
|
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
|
||
|
hashNumPairings.Add(size)
|
||
|
case len(key) == common.HashLength:
|
||
|
tries.Add(size)
|
||
|
case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
|
||
|
codes.Add(size)
|
||
|
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
|
||
|
txLookups.Add(size)
|
||
|
case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
|
||
|
accountSnaps.Add(size)
|
||
|
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
|
||
|
storageSnaps.Add(size)
|
||
|
case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
|
||
|
preimages.Add(size)
|
||
|
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
|
||
|
metadata.Add(size)
|
||
|
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
|
||
|
metadata.Add(size)
|
||
|
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
|
||
|
bloomBits.Add(size)
|
||
|
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
|
||
|
bloomBits.Add(size)
|
||
|
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
|
||
|
beaconHeaders.Add(size)
|
||
|
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
|
||
|
cliqueSnaps.Add(size)
|
||
|
case bytes.HasPrefix(key, ChtTablePrefix) ||
|
||
|
bytes.HasPrefix(key, ChtIndexTablePrefix) ||
|
||
|
bytes.HasPrefix(key, ChtPrefix): // Canonical hash trie
|
||
|
chtTrieNodes.Add(size)
|
||
|
case bytes.HasPrefix(key, BloomTrieTablePrefix) ||
|
||
|
bytes.HasPrefix(key, BloomTrieIndexPrefix) ||
|
||
|
bytes.HasPrefix(key, BloomTriePrefix): // Bloomtrie sub
|
||
|
bloomTrieNodes.Add(size)
|
||
|
default:
|
||
|
var accounted bool
|
||
|
for _, meta := range [][]byte{
|
||
|
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
|
||
|
lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
|
||
|
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
|
||
|
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
|
||
|
} {
|
||
|
if bytes.Equal(key, meta) {
|
||
|
metadata.Add(size)
|
||
|
accounted = true
|
||
|
break
|
||
|
}
|
||
|
}
|
||
|
if !accounted {
|
||
|
unaccounted.Add(size)
|
||
|
}
|
||
|
}
|
||
|
count++
|
||
|
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
|
||
|
log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||
|
logged = time.Now()
|
||
|
}
|
||
|
}
|
||
|
// Display the database statistic of key-value store.
|
||
|
stats := [][]string{
|
||
|
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
|
||
|
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
|
||
|
{"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
|
||
|
{"Key-Value store", "Difficulties", tds.Size(), tds.Count()},
|
||
|
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
|
||
|
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
|
||
|
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
|
||
|
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
|
||
|
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
|
||
|
{"Key-Value store", "Trie nodes", tries.Size(), tries.Count()},
|
||
|
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
|
||
|
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
|
||
|
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
|
||
|
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
|
||
|
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
|
||
|
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
|
||
|
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
|
||
|
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
|
||
|
}
|
||
|
// Inspect all registered append-only file store then.
|
||
|
ancients, err := inspectFreezers(db)
|
||
|
if err != nil {
|
||
|
return err
|
||
|
}
|
||
|
for _, ancient := range ancients {
|
||
|
for _, table := range ancient.sizes {
|
||
|
stats = append(stats, []string{
|
||
|
fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
|
||
|
strings.Title(table.name),
|
||
|
table.size.String(),
|
||
|
fmt.Sprintf("%d", ancient.count()),
|
||
|
})
|
||
|
}
|
||
|
total += ancient.size()
|
||
|
}
|
||
|
table := tablewriter.NewWriter(os.Stdout)
|
||
|
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
|
||
|
table.SetFooter([]string{"", "Total", total.String(), " "})
|
||
|
table.AppendBulk(stats)
|
||
|
table.Render()
|
||
|
|
||
|
if unaccounted.size > 0 {
|
||
|
utils.Logger().Error().
|
||
|
Interface("size", unaccounted.size).
|
||
|
Interface("count", unaccounted.count).
|
||
|
Msg("Database contains unaccounted data")
|
||
|
}
|
||
|
return nil
|
||
|
}
|