|
|
|
@ -1192,6 +1192,171 @@ func (bc *BlockChainImpl) Rollback(chain []common.Hash) error { |
|
|
|
|
return bc.removeInValidatorList(valsToRemove) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// SetReceiptsData computes all the non-consensus fields of the receipts
|
|
|
|
|
func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { |
|
|
|
|
signer := types.MakeSigner(config, block.Epoch()) |
|
|
|
|
ethSigner := types.NewEIP155Signer(config.EthCompatibleChainID) |
|
|
|
|
|
|
|
|
|
transactions, stakingTransactions, logIndex := block.Transactions(), block.StakingTransactions(), uint(0) |
|
|
|
|
if len(transactions)+len(stakingTransactions) != len(receipts) { |
|
|
|
|
return errors.New("transaction+stakingTransactions and receipt count mismatch") |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// The used gas can be calculated based on previous receipts
|
|
|
|
|
if len(receipts) > 0 && len(transactions) > 0 { |
|
|
|
|
receipts[0].GasUsed = receipts[0].CumulativeGasUsed |
|
|
|
|
} |
|
|
|
|
for j := 1; j < len(transactions); j++ { |
|
|
|
|
// The transaction hash can be retrieved from the transaction itself
|
|
|
|
|
receipts[j].TxHash = transactions[j].Hash() |
|
|
|
|
receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed |
|
|
|
|
// The contract address can be derived from the transaction itself
|
|
|
|
|
if transactions[j].To() == nil { |
|
|
|
|
// Deriving the signer is expensive, only do if it's actually needed
|
|
|
|
|
var from common.Address |
|
|
|
|
if transactions[j].IsEthCompatible() { |
|
|
|
|
from, _ = types.Sender(ethSigner, transactions[j]) |
|
|
|
|
} else { |
|
|
|
|
from, _ = types.Sender(signer, transactions[j]) |
|
|
|
|
} |
|
|
|
|
receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) |
|
|
|
|
} |
|
|
|
|
// The derived log fields can simply be set from the block and transaction
|
|
|
|
|
for k := 0; k < len(receipts[j].Logs); k++ { |
|
|
|
|
receipts[j].Logs[k].BlockNumber = block.NumberU64() |
|
|
|
|
receipts[j].Logs[k].BlockHash = block.Hash() |
|
|
|
|
receipts[j].Logs[k].TxHash = receipts[j].TxHash |
|
|
|
|
receipts[j].Logs[k].TxIndex = uint(j) |
|
|
|
|
receipts[j].Logs[k].Index = logIndex |
|
|
|
|
logIndex++ |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// The used gas can be calculated based on previous receipts
|
|
|
|
|
if len(receipts) > len(transactions) && len(stakingTransactions) > 0 { |
|
|
|
|
receipts[len(transactions)].GasUsed = receipts[len(transactions)].CumulativeGasUsed |
|
|
|
|
} |
|
|
|
|
// in a block, txns are processed before staking txns
|
|
|
|
|
for j := len(transactions) + 1; j < len(transactions)+len(stakingTransactions); j++ { |
|
|
|
|
// The transaction hash can be retrieved from the staking transaction itself
|
|
|
|
|
receipts[j].TxHash = stakingTransactions[j].Hash() |
|
|
|
|
receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed |
|
|
|
|
// The derived log fields can simply be set from the block and transaction
|
|
|
|
|
for k := 0; k < len(receipts[j].Logs); k++ { |
|
|
|
|
receipts[j].Logs[k].BlockNumber = block.NumberU64() |
|
|
|
|
receipts[j].Logs[k].BlockHash = block.Hash() |
|
|
|
|
receipts[j].Logs[k].TxHash = receipts[j].TxHash |
|
|
|
|
receipts[j].Logs[k].TxIndex = uint(j) + uint(len(transactions)) |
|
|
|
|
receipts[j].Logs[k].Index = logIndex |
|
|
|
|
logIndex++ |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
return nil |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// InsertReceiptChain attempts to complete an already existing header chain with
|
|
|
|
|
// transaction and receipt data.
|
|
|
|
|
func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { |
|
|
|
|
// Do a sanity check that the provided chain is actually ordered and linked
|
|
|
|
|
for i := 1; i < len(blockChain); i++ { |
|
|
|
|
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { |
|
|
|
|
utils.Logger().Error(). |
|
|
|
|
Str("number", blockChain[i].Number().String()). |
|
|
|
|
Str("hash", blockChain[i].Hash().Hex()). |
|
|
|
|
Str("parent", blockChain[i].ParentHash().Hex()). |
|
|
|
|
Str("prevnumber", blockChain[i-1].Number().String()). |
|
|
|
|
Str("prevhash", blockChain[i-1].Hash().Hex()). |
|
|
|
|
Msg("Non contiguous receipt insert") |
|
|
|
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), |
|
|
|
|
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
bc.chainmu.Lock() |
|
|
|
|
defer bc.chainmu.Unlock() |
|
|
|
|
|
|
|
|
|
var ( |
|
|
|
|
stats = struct{ processed, ignored int32 }{} |
|
|
|
|
start = time.Now() |
|
|
|
|
bytes = 0 |
|
|
|
|
batch = bc.db.NewBatch() |
|
|
|
|
) |
|
|
|
|
for i, block := range blockChain { |
|
|
|
|
receipts := receiptChain[i] |
|
|
|
|
// Short circuit insertion if shutting down or processing failed
|
|
|
|
|
if atomic.LoadInt32(&bc.procInterrupt) == 1 { |
|
|
|
|
return 0, nil |
|
|
|
|
} |
|
|
|
|
// Short circuit if the owner header is unknown
|
|
|
|
|
if !bc.HasHeader(block.Hash(), block.NumberU64()) { |
|
|
|
|
return 0, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) |
|
|
|
|
} |
|
|
|
|
// Skip if the entire data is already known
|
|
|
|
|
if bc.HasBlock(block.Hash(), block.NumberU64()) { |
|
|
|
|
stats.ignored++ |
|
|
|
|
continue |
|
|
|
|
} |
|
|
|
|
// Compute all the non-consensus fields of the receipts
|
|
|
|
|
if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { |
|
|
|
|
return 0, fmt.Errorf("failed to set receipts data: %v", err) |
|
|
|
|
} |
|
|
|
|
// Write all the data out into the database
|
|
|
|
|
if err := rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { |
|
|
|
|
return 0, err |
|
|
|
|
} |
|
|
|
|
if err := rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { |
|
|
|
|
return 0, err |
|
|
|
|
} |
|
|
|
|
if err := rawdb.WriteBlockTxLookUpEntries(batch, block); err != nil { |
|
|
|
|
return 0, err |
|
|
|
|
} |
|
|
|
|
if err := rawdb.WriteBlockStxLookUpEntries(batch, block); err != nil { |
|
|
|
|
return 0, err |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
stats.processed++ |
|
|
|
|
|
|
|
|
|
if batch.ValueSize() >= ethdb.IdealBatchSize { |
|
|
|
|
if err := batch.Write(); err != nil { |
|
|
|
|
return 0, err |
|
|
|
|
} |
|
|
|
|
bytes += batch.ValueSize() |
|
|
|
|
batch.Reset() |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
if batch.ValueSize() > 0 { |
|
|
|
|
bytes += batch.ValueSize() |
|
|
|
|
if err := batch.Write(); err != nil { |
|
|
|
|
return 0, err |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Update the head fast sync block if better
|
|
|
|
|
bc.mu.Lock() |
|
|
|
|
head := blockChain[len(blockChain)-1] |
|
|
|
|
if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
|
|
|
|
|
currentFastBlock := bc.CurrentFastBlock() |
|
|
|
|
if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { |
|
|
|
|
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) |
|
|
|
|
bc.currentFastBlock.Store(head) |
|
|
|
|
headFastBlockGauge.Update(int64(head.NumberU64())) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
bc.mu.Unlock() |
|
|
|
|
|
|
|
|
|
utils.Logger().Info(). |
|
|
|
|
Int32("count", stats.processed). |
|
|
|
|
Str("elapsed", common.PrettyDuration(time.Since(start)).String()). |
|
|
|
|
Str("age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)).String()). |
|
|
|
|
Str("head", head.Number().String()). |
|
|
|
|
Str("hash", head.Hash().Hex()). |
|
|
|
|
Str("size", common.StorageSize(bytes).String()). |
|
|
|
|
Int32("ignored", stats.ignored). |
|
|
|
|
Msg("Imported new block receipts") |
|
|
|
|
|
|
|
|
|
return 0, nil |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
var lastWrite uint64 |
|
|
|
|
|
|
|
|
|
func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block) (err error) { |
|
|
|
|