@ -18,6 +18,7 @@
package core
import (
"bytes"
"errors"
"fmt"
"io"
@ -58,19 +59,20 @@ var (
)
const (
bodyCacheLimit = 256
blockCacheLimit = 256
receiptsCacheLimit = 32
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
badBlockLimit = 10
triesInMemory = 128
shardCacheLimit = 2
commitsCacheLimit = 10
epochCacheLimit = 10
randomnessCacheLimit = 10
stakingCacheLimit = 256
validatorListCacheLimit = 2
bodyCacheLimit = 256
blockCacheLimit = 256
receiptsCacheLimit = 32
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
badBlockLimit = 10
triesInMemory = 128
shardCacheLimit = 2
commitsCacheLimit = 10
epochCacheLimit = 10
randomnessCacheLimit = 10
stakingCacheLimit = 256
validatorListCacheLimit = 2
validatorListByDelegatorCacheLimit = 256
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
BlockChainVersion = 3
@ -123,18 +125,19 @@ type BlockChain struct {
currentBlock atomic . Value // Current head of the block chain
currentFastBlock atomic . Value // Current head of the fast-sync chain (may be above the block chain!)
stateCache state . Database // State database to reuse between imports (contains state cache)
bodyCache * lru . Cache // Cache for the most recent block bodies
bodyRLPCache * lru . Cache // Cache for the most recent block bodies in RLP encoded format
receiptsCache * lru . Cache // Cache for the most recent receipts per block
blockCache * lru . Cache // Cache for the most recent entire blocks
futureBlocks * lru . Cache // future blocks are blocks added for later processing
shardStateCache * lru . Cache
lastCommitsCache * lru . Cache
epochCache * lru . Cache // Cache epoch number → first block number
randomnessCache * lru . Cache // Cache for vrf/vdf
stakingCache * lru . Cache // Cache for staking validator
validatorListCache * lru . Cache // Cache of validator list
stateCache state . Database // State database to reuse between imports (contains state cache)
bodyCache * lru . Cache // Cache for the most recent block bodies
bodyRLPCache * lru . Cache // Cache for the most recent block bodies in RLP encoded format
receiptsCache * lru . Cache // Cache for the most recent receipts per block
blockCache * lru . Cache // Cache for the most recent entire blocks
futureBlocks * lru . Cache // future blocks are blocks added for later processing
shardStateCache * lru . Cache
lastCommitsCache * lru . Cache
epochCache * lru . Cache // Cache epoch number → first block number
randomnessCache * lru . Cache // Cache for vrf/vdf
stakingCache * lru . Cache // Cache for staking validator
validatorListCache * lru . Cache // Cache of validator list
validatorListByDelegatorCache * lru . Cache // Cache of validator list by delegator
quit chan struct { } // blockchain quit channel
running int32 // running must be called atomically
@ -173,29 +176,31 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
randomnessCache , _ := lru . New ( randomnessCacheLimit )
stakingCache , _ := lru . New ( stakingCacheLimit )
validatorListCache , _ := lru . New ( validatorListCacheLimit )
validatorListByDelegatorCache , _ := lru . New ( validatorListByDelegatorCacheLimit )
bc := & BlockChain {
chainConfig : chainConfig ,
cacheConfig : cacheConfig ,
db : db ,
triegc : prque . New ( nil ) ,
stateCache : state . NewDatabase ( db ) ,
quit : make ( chan struct { } ) ,
shouldPreserve : shouldPreserve ,
bodyCache : bodyCache ,
bodyRLPCache : bodyRLPCache ,
receiptsCache : receiptsCache ,
blockCache : blockCache ,
futureBlocks : futureBlocks ,
shardStateCache : shardCache ,
lastCommitsCache : commitsCache ,
epochCache : epochCache ,
randomnessCache : randomnessCache ,
stakingCache : stakingCache ,
validatorListCache : validatorListCache ,
engine : engine ,
vmConfig : vmConfig ,
badBlocks : badBlocks ,
chainConfig : chainConfig ,
cacheConfig : cacheConfig ,
db : db ,
triegc : prque . New ( nil ) ,
stateCache : state . NewDatabase ( db ) ,
quit : make ( chan struct { } ) ,
shouldPreserve : shouldPreserve ,
bodyCache : bodyCache ,
bodyRLPCache : bodyRLPCache ,
receiptsCache : receiptsCache ,
blockCache : blockCache ,
futureBlocks : futureBlocks ,
shardStateCache : shardCache ,
lastCommitsCache : commitsCache ,
epochCache : epochCache ,
randomnessCache : randomnessCache ,
stakingCache : stakingCache ,
validatorListCache : validatorListCache ,
validatorListByDelegatorCache : validatorListByDelegatorCache ,
engine : engine ,
vmConfig : vmConfig ,
badBlocks : badBlocks ,
}
bc . SetValidator ( NewBlockValidator ( chainConfig , bc , engine ) )
bc . SetProcessor ( NewStateProcessor ( chainConfig , bc , engine ) )
@ -1068,6 +1073,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
}
// Write other block data using a batch.
// TODO: put following into a func
/////////////////////////// START
batch := bc . db . NewBatch ( )
rawdb . WriteReceipts ( batch , block . Hash ( ) , block . NumberU64 ( ) , receipts )
@ -1083,25 +1090,90 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
err := rawdb . WriteCXReceipts ( batch , uint32 ( i ) , block . NumberU64 ( ) , block . Hash ( ) , shardReceipts , false )
if err != nil {
utils . Logger ( ) . Debug ( ) . Err ( err ) . Interface ( "shardReceipts" , shardReceipts ) . Int ( "toShardID" , i ) . Msg ( "WriteCXReceipts cannot write into database" )
return NonStatTy , err
}
}
// Mark incomingReceipts in the block as spent
bc . WriteCXReceiptsProofSpent ( block . IncomingReceipts ( ) )
}
//check non zero VRF field in header and add to local db
if len ( block . Vrf ( ) ) > 0 {
vrfBlockNumbers , _ := bc . ReadEpochVrfBlockNums ( block . Header ( ) . Epoch ( ) )
if ( len ( vrfBlockNumbers ) > 0 ) && ( vrfBlockNumbers [ len ( vrfBlockNumbers ) - 1 ] == block . NumberU64 ( ) ) {
utils . Logger ( ) . Error ( ) .
Str ( "number" , block . Number ( ) . String ( ) ) .
Str ( "epoch" , block . Header ( ) . Epoch ( ) . String ( ) ) .
Msg ( "VRF block number is already in local db" )
} else {
vrfBlockNumbers = append ( vrfBlockNumbers , block . NumberU64 ( ) )
err = bc . WriteEpochVrfBlockNums ( block . Header ( ) . Epoch ( ) , vrfBlockNumbers )
if err != nil {
utils . Logger ( ) . Error ( ) .
Str ( "number" , block . Number ( ) . String ( ) ) .
Str ( "epoch" , block . Header ( ) . Epoch ( ) . String ( ) ) .
Msg ( "failed to write VRF block number to local db" )
return NonStatTy , err
}
}
}
//check non zero Vdf in header and add to local db
if len ( block . Vdf ( ) ) > 0 {
err = bc . WriteEpochVdfBlockNum ( block . Header ( ) . Epoch ( ) , block . Number ( ) )
if err != nil {
utils . Logger ( ) . Error ( ) .
Str ( "number" , block . Number ( ) . String ( ) ) .
Str ( "epoch" , block . Header ( ) . Epoch ( ) . String ( ) ) .
Msg ( "failed to write VDF block number to local db" )
return NonStatTy , err
}
}
header := block . Header ( )
if header . ShardStateHash ( ) != ( common . Hash { } ) {
epoch := new ( big . Int ) . Add ( header . Epoch ( ) , common . Big1 )
err = bc . WriteShardStateBytes ( batch , epoch , header . ShardState ( ) )
if err != nil {
header . Logger ( utils . Logger ( ) ) . Warn ( ) . Err ( err ) . Msg ( "cannot store shard state" )
return NonStatTy , err
}
}
if len ( header . CrossLinks ( ) ) > 0 {
crossLinks := & types . CrossLinks { }
err = rlp . DecodeBytes ( header . CrossLinks ( ) , crossLinks )
if err != nil {
header . Logger ( utils . Logger ( ) ) . Warn ( ) . Err ( err ) . Msg ( "[insertChain] cannot parse cross links" )
return NonStatTy , err
}
if ! crossLinks . IsSorted ( ) {
header . Logger ( utils . Logger ( ) ) . Warn ( ) . Err ( err ) . Msg ( "[insertChain] cross links are not sorted" )
return NonStatTy , errors . New ( "proposed cross links are not sorted" )
}
for _ , crossLink := range * crossLinks {
if err := bc . WriteCrossLinks ( types . CrossLinks { crossLink } , false ) ; err == nil {
utils . Logger ( ) . Info ( ) . Uint64 ( "blockNum" , crossLink . BlockNum ( ) . Uint64 ( ) ) . Uint32 ( "shardID" , crossLink . ShardID ( ) ) . Msg ( "[InsertChain] Cross Link Added to Beaconchain" )
}
bc . DeleteCrossLinks ( types . CrossLinks { crossLink } , true )
bc . WriteShardLastCrossLink ( crossLink . ShardID ( ) , crossLink )
}
}
if bc . chainConfig . IsStaking ( block . Epoch ( ) ) {
for _ , tx := range block . StakingTransactions ( ) {
err = bc . UpdateValidatorList ( tx )
err = bc . UpdateStakingMetaData ( tx )
// keep offchain database consistency with onchain we need revert
// but it should not happend unless local database corrupted
if err != nil {
utils . Logger ( ) . Debug ( ) . Msgf ( "oops, UpdateValidatorList failed, err: %+v" , err )
utils . Logger ( ) . Debug ( ) . Msgf ( "oops, UpdateStakingMetaData failed, err: %+v" , err )
return NonStatTy , err
}
}
}
/////////////////////////// END
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@ -1145,49 +1217,6 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// After insertion is done, all accumulated events will be fired.
func ( bc * BlockChain ) InsertChain ( chain types . Blocks , verifyHeaders bool ) ( int , error ) {
n , events , logs , err := bc . insertChain ( chain , verifyHeaders )
if err == nil {
// TODO: incorporate these into insertChain
for idx , block := range chain {
header := block . Header ( )
header . Logger ( utils . Logger ( ) ) . Info ( ) .
Int ( "segmentIndex" , idx ) .
Str ( "parentHash" , header . ParentHash ( ) . Hex ( ) ) .
Msg ( "added block to chain" )
// TODO: move into WriteBlockWithState
if header . ShardStateHash ( ) != ( common . Hash { } ) {
epoch := new ( big . Int ) . Add ( header . Epoch ( ) , common . Big1 )
err = bc . WriteShardStateBytes ( epoch , header . ShardState ( ) )
if err != nil {
header . Logger ( utils . Logger ( ) ) . Warn ( ) . Err ( err ) . Msg ( "cannot store shard state" )
return n , err
}
}
// TODO: move into WriteBlockWithState
if len ( header . CrossLinks ( ) ) > 0 {
crossLinks := & types . CrossLinks { }
err = rlp . DecodeBytes ( header . CrossLinks ( ) , crossLinks )
if err != nil {
header . Logger ( utils . Logger ( ) ) . Warn ( ) . Err ( err ) . Msg ( "[insertChain] cannot parse cross links" )
return n , err
}
if ! crossLinks . IsSorted ( ) {
header . Logger ( utils . Logger ( ) ) . Warn ( ) . Err ( err ) . Msg ( "[insertChain] cross links are not sorted" )
return n , errors . New ( "proposed cross links are not sorted" )
}
for _ , crossLink := range * crossLinks {
if err := bc . WriteCrossLinks ( types . CrossLinks { crossLink } , false ) ; err == nil {
utils . Logger ( ) . Info ( ) . Uint64 ( "blockNum" , crossLink . BlockNum ( ) . Uint64 ( ) ) . Uint32 ( "shardID" , crossLink . ShardID ( ) ) . Msg ( "[InsertChain] Cross Link Added to Beaconchain" )
}
bc . DeleteCrossLinks ( types . CrossLinks { crossLink } , true )
bc . WriteShardLastCrossLink ( crossLink . ShardID ( ) , crossLink )
}
}
}
}
// This should be done after everything about adding a block is done.
bc . PostChainEvents ( events , logs )
return n , err
}
@ -1396,37 +1425,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifyHeaders bool) (int,
cache , _ := bc . stateCache . TrieDB ( ) . Size ( )
stats . report ( chain , i , cache )
//check non zero VRF field in header and add to local db
if len ( block . Vrf ( ) ) > 0 {
vrfBlockNumbers , _ := bc . ReadEpochVrfBlockNums ( block . Header ( ) . Epoch ( ) )
if ( len ( vrfBlockNumbers ) > 0 ) && ( vrfBlockNumbers [ len ( vrfBlockNumbers ) - 1 ] == block . NumberU64 ( ) ) {
utils . Logger ( ) . Error ( ) .
Str ( "number" , chain [ i ] . Number ( ) . String ( ) ) .
Str ( "epoch" , block . Header ( ) . Epoch ( ) . String ( ) ) .
Msg ( "VRF block number is already in local db" )
} else {
vrfBlockNumbers = append ( vrfBlockNumbers , block . NumberU64 ( ) )
err = bc . WriteEpochVrfBlockNums ( block . Header ( ) . Epoch ( ) , vrfBlockNumbers )
if err != nil {
utils . Logger ( ) . Error ( ) .
Str ( "number" , chain [ i ] . Number ( ) . String ( ) ) .
Str ( "epoch" , block . Header ( ) . Epoch ( ) . String ( ) ) .
Msg ( "failed to write VRF block number to local db" )
}
}
}
//check non zero Vdf in header and add to local db
if len ( block . Vdf ( ) ) > 0 {
err = bc . WriteEpochVdfBlockNum ( block . Header ( ) . Epoch ( ) , block . Number ( ) )
if err != nil {
utils . Logger ( ) . Error ( ) .
Str ( "number" , chain [ i ] . Number ( ) . String ( ) ) .
Str ( "epoch" , block . Header ( ) . Epoch ( ) . String ( ) ) .
Msg ( "failed to write VDF block number to local db" )
}
}
}
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc . CurrentBlock ( ) . Hash ( ) == lastCanon . Hash ( ) {
@ -1870,14 +1868,14 @@ func (bc *BlockChain) WriteShardState(
}
// WriteShardStateBytes saves the given sharding state under the given epoch number.
func ( bc * BlockChain ) WriteShardStateBytes (
func ( bc * BlockChain ) WriteShardStateBytes ( db rawdb . DatabaseWriter ,
epoch * big . Int , shardState [ ] byte ,
) error {
decodeShardState := shard . State { }
if err := rlp . DecodeBytes ( shardState , & decodeShardState ) ; err != nil {
return err
}
err := rawdb . WriteShardStateBytes ( bc . db , epoch , shardState )
err := rawdb . WriteShardStateBytes ( db , epoch , shardState )
if err != nil {
return err
}
@ -2325,16 +2323,41 @@ func (bc *BlockChain) WriteValidatorList(addrs []common.Address) error {
if err != nil {
return err
}
by , err := rlp . EncodeToBytes ( addrs )
bytes , err := rlp . EncodeToBytes ( addrs )
if err == nil {
bc . validatorListCache . Add ( "validatorList" , bytes )
}
return nil
}
// ReadValidatorListByDelegator reads the addresses of validators delegated by a delegator
func ( bc * BlockChain ) ReadValidatorListByDelegator ( delegator common . Address ) ( [ ] common . Address , error ) {
if cached , ok := bc . validatorListByDelegatorCache . Get ( delegator . Bytes ( ) ) ; ok {
by := cached . ( [ ] byte )
m := [ ] common . Address { }
if err := rlp . DecodeBytes ( by , & m ) ; err != nil {
return nil , err
}
return m , nil
}
return rawdb . ReadValidatorListByDelegator ( bc . db , delegator )
}
// WriteValidatorListByDelegator writes the list of validator addresses to database
func ( bc * BlockChain ) WriteValidatorListByDelegator ( delegator common . Address , addrs [ ] common . Address ) error {
err := rawdb . WriteValidatorListByDelegator ( bc . db , delegator , addrs )
if err != nil {
return err
}
bc . validatorListCache . Add ( "validatorList" , by )
bytes , err := rlp . EncodeToBytes ( addrs )
if err == nil {
bc . validatorListByDelegatorCache . Add ( delegator . Bytes ( ) , bytes )
}
return nil
}
// UpdateValidatorList updates the validator map according to staking transaction
func ( bc * BlockChain ) UpdateValidatorList ( tx * staking . StakingTransaction ) error {
// UpdateStakingMetaData updates the validator's and the delegator's meta data according to staking transaction
func ( bc * BlockChain ) UpdateStakingMetaData ( tx * staking . StakingTransaction ) error {
// TODO: simply the logic here in staking/types/transaction.go
payload , err := tx . RLPEncodeStakeMsg ( )
if err != nil {
@ -2365,8 +2388,26 @@ func (bc *BlockChain) UpdateValidatorList(tx *staking.StakingTransaction) error
// following cases are placeholder for now
case staking . DirectiveEditValidator :
case staking . DirectiveDelegate :
delegate := decodePayload . ( * staking . Delegate )
validators , err := bc . ReadValidatorListByDelegator ( delegate . DelegatorAddress )
if err != nil {
return err
}
found := false
for _ , validator := range validators {
if bytes . Compare ( validator . Bytes ( ) , delegate . ValidatorAddress . Bytes ( ) ) == 0 {
found = true
break
}
}
if ! found {
validators = append ( validators , delegate . ValidatorAddress )
}
err = bc . WriteValidatorListByDelegator ( delegate . DelegatorAddress , validators )
return err
case staking . DirectiveUndelegate :
case staking . DirectiveCollectRewards :
// TODO: Check whether the delegation reward can be cleared after reward is collected
default :
}
return nil