fix max big int limit comparison issue with overflow

pull/1319/head
Dennis Won 5 years ago
parent c48f0bea46
commit 9da6f5624c
  1. 6
      internal/configs/sharding/fixedschedule.go
  2. 8
      internal/configs/sharding/localnet.go
  3. 8
      internal/configs/sharding/mainnet.go
  4. 4
      internal/configs/sharding/shardingconfig.go
  5. 8
      internal/configs/sharding/testnet.go
  6. 9
      node/node.go
  7. 24
      node/worker/worker.go

@ -34,8 +34,8 @@ func (s fixedSchedule) IsLastBlock(blockNum uint64) bool {
return blockNum%blocks == blocks-1 return blockNum%blocks == blocks-1
} }
func (s fixedSchedule) MaxTxAmountLimit() *big.Int { func (s fixedSchedule) MaxTxAmountNanoLimit() *big.Int {
amountBigInt := big.NewInt(int64(mainnetMaxTxAmountLimit * denominations.Nano)) amountBigInt := big.NewInt(mainnetMaxTxAmountNanoLimit)
amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano)) amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano))
return amountBigInt return amountBigInt
} }
@ -50,7 +50,7 @@ func (s fixedSchedule) MaxTxsPerBlockLimit() int {
func (s fixedSchedule) TxsThrottleConfig() *TxsThrottleConfig { func (s fixedSchedule) TxsThrottleConfig() *TxsThrottleConfig {
return &TxsThrottleConfig{ return &TxsThrottleConfig{
MaxTxAmountLimit: s.MaxTxAmountLimit(), MaxTxAmountNanoLimit: s.MaxTxAmountNanoLimit(),
MaxNumRecentTxsPerAccountLimit: s.MaxNumRecentTxsPerAccountLimit(), MaxNumRecentTxsPerAccountLimit: s.MaxNumRecentTxsPerAccountLimit(),
MaxTxsPerBlockLimit: s.MaxTxsPerBlockLimit(), MaxTxsPerBlockLimit: s.MaxTxsPerBlockLimit(),
} }

@ -20,7 +20,7 @@ const (
localnetEpochBlock1 = 20 localnetEpochBlock1 = 20
twoOne = 5 twoOne = 5
localnetMaxTxAmountLimit = 1e2 // unit is in One localnetMaxTxAmountNanoLimit = 1e2 // unit is in One
localnetMaxNumRecentTxsPerAccountLimit = 2 localnetMaxNumRecentTxsPerAccountLimit = 2
localnetMaxTxsPerBlockLimit = 8000 localnetMaxTxsPerBlockLimit = 8000
) )
@ -62,8 +62,8 @@ func (ls localnetSchedule) IsLastBlock(blockNum uint64) bool {
} }
} }
func (ls localnetSchedule) MaxTxAmountLimit() *big.Int { func (ls localnetSchedule) MaxTxAmountNanoLimit() *big.Int {
amountBigInt := big.NewInt(int64(localnetMaxTxAmountLimit * denominations.Nano)) amountBigInt := big.NewInt(localnetMaxTxAmountNanoLimit)
amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano)) amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano))
return amountBigInt return amountBigInt
} }
@ -78,7 +78,7 @@ func (ls localnetSchedule) MaxTxsPerBlockLimit() int {
func (ls localnetSchedule) TxsThrottleConfig() *TxsThrottleConfig { func (ls localnetSchedule) TxsThrottleConfig() *TxsThrottleConfig {
return &TxsThrottleConfig{ return &TxsThrottleConfig{
MaxTxAmountLimit: ls.MaxTxAmountLimit(), MaxTxAmountNanoLimit: ls.MaxTxAmountNanoLimit(),
MaxNumRecentTxsPerAccountLimit: ls.MaxNumRecentTxsPerAccountLimit(), MaxNumRecentTxsPerAccountLimit: ls.MaxNumRecentTxsPerAccountLimit(),
MaxTxsPerBlockLimit: ls.MaxTxsPerBlockLimit(), MaxTxsPerBlockLimit: ls.MaxTxsPerBlockLimit(),
} }

@ -13,7 +13,7 @@ const (
mainnetV1Epoch = 1 mainnetV1Epoch = 1
mainnetV2Epoch = 5 mainnetV2Epoch = 5
mainnetMaxTxAmountLimit = 1e3 // unit is in One mainnetMaxTxAmountNanoLimit = 1e3 // unit is in One
mainnetMaxNumRecentTxsPerAccountLimit = 10 mainnetMaxNumRecentTxsPerAccountLimit = 10
mainnetMaxTxsPerBlockLimit = 8000 mainnetMaxTxsPerBlockLimit = 8000
) )
@ -62,8 +62,8 @@ func (ms mainnetSchedule) IsLastBlock(blockNum uint64) bool {
} }
} }
func (ms mainnetSchedule) MaxTxAmountLimit() *big.Int { func (ms mainnetSchedule) MaxTxAmountNanoLimit() *big.Int {
amountBigInt := big.NewInt(int64(mainnetMaxTxAmountLimit * denominations.Nano)) amountBigInt := big.NewInt(mainnetMaxTxAmountNanoLimit)
amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano)) amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano))
return amountBigInt return amountBigInt
} }
@ -78,7 +78,7 @@ func (ms mainnetSchedule) MaxTxsPerBlockLimit() int {
func (ms mainnetSchedule) TxsThrottleConfig() *TxsThrottleConfig { func (ms mainnetSchedule) TxsThrottleConfig() *TxsThrottleConfig {
return &TxsThrottleConfig{ return &TxsThrottleConfig{
MaxTxAmountLimit: ms.MaxTxAmountLimit(), MaxTxAmountNanoLimit: ms.MaxTxAmountNanoLimit(),
MaxNumRecentTxsPerAccountLimit: ms.MaxNumRecentTxsPerAccountLimit(), MaxNumRecentTxsPerAccountLimit: ms.MaxNumRecentTxsPerAccountLimit(),
MaxTxsPerBlockLimit: ms.MaxTxsPerBlockLimit(), MaxTxsPerBlockLimit: ms.MaxTxsPerBlockLimit(),
} }

@ -23,7 +23,7 @@ type Schedule interface {
IsLastBlock(blockNum uint64) bool IsLastBlock(blockNum uint64) bool
// Max amount limit for a valid transaction // Max amount limit for a valid transaction
MaxTxAmountLimit() *big.Int MaxTxAmountNanoLimit() *big.Int
// Max number of transactions of a particular account per block level // Max number of transactions of a particular account per block level
MaxNumRecentTxsPerAccountLimit() uint64 MaxNumRecentTxsPerAccountLimit() uint64
@ -85,7 +85,7 @@ func (result TxThrottleFlag) String() string {
// TxsThrottleConfig contains configuration for throttling pending transactions per node block // TxsThrottleConfig contains configuration for throttling pending transactions per node block
type TxsThrottleConfig struct { type TxsThrottleConfig struct {
// Max amount limit for a valid transaction // Max amount limit for a valid transaction
MaxTxAmountLimit *big.Int MaxTxAmountNanoLimit *big.Int
// Max number of transactions of a particular account for the past hour // Max number of transactions of a particular account for the past hour
MaxNumRecentTxsPerAccountLimit uint64 MaxNumRecentTxsPerAccountLimit uint64

@ -20,7 +20,7 @@ const (
testnetEpochBlock1 = 78 testnetEpochBlock1 = 78
threeOne = 111 threeOne = 111
testnetMaxTxAmountLimit = 1e3 // unit is in One testnetMaxTxAmountNanoLimit = 1e3 // unit is in One
testnetMaxNumRecentTxsPerAccountLimit = 10 testnetMaxNumRecentTxsPerAccountLimit = 10
testnetMaxTxsPerBlockLimit = 8000 testnetMaxTxsPerBlockLimit = 8000
) )
@ -63,8 +63,8 @@ func (ts testnetSchedule) IsLastBlock(blockNum uint64) bool {
} }
} }
func (ts testnetSchedule) MaxTxAmountLimit() *big.Int { func (ts testnetSchedule) MaxTxAmountNanoLimit() *big.Int {
amountBigInt := big.NewInt(int64(testnetMaxTxAmountLimit * denominations.Nano)) amountBigInt := big.NewInt(testnetMaxTxAmountNanoLimit)
amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano)) amountBigInt = amountBigInt.Mul(amountBigInt, big.NewInt(denominations.Nano))
return amountBigInt return amountBigInt
} }
@ -79,7 +79,7 @@ func (ts testnetSchedule) MaxTxsPerBlockLimit() int {
func (ts testnetSchedule) TxsThrottleConfig() *TxsThrottleConfig { func (ts testnetSchedule) TxsThrottleConfig() *TxsThrottleConfig {
return &TxsThrottleConfig{ return &TxsThrottleConfig{
MaxTxAmountLimit: ts.MaxTxAmountLimit(), MaxTxAmountNanoLimit: ts.MaxTxAmountNanoLimit(),
MaxNumRecentTxsPerAccountLimit: ts.MaxNumRecentTxsPerAccountLimit(), MaxNumRecentTxsPerAccountLimit: ts.MaxNumRecentTxsPerAccountLimit(),
MaxTxsPerBlockLimit: ts.MaxTxsPerBlockLimit(), MaxTxsPerBlockLimit: ts.MaxTxsPerBlockLimit(),
} }

@ -271,8 +271,7 @@ func (node *Node) getTransactionsForNewBlock(coinbase common.Address) types.Tran
node.pendingTransactions = unselected node.pendingTransactions = unselected
node.reducePendingTransactions() node.reducePendingTransactions()
utils.GetLogInstance().Info( utils.GetLogInstance().Info("Selecting Transactions",
"msg", "Selecting Transactions",
"newBlockNum", newBlockNum, "newBlockNum", newBlockNum,
"remainPending", len(node.pendingTransactions), "remainPending", len(node.pendingTransactions),
"invalidDiscarded", len(invalid)) "invalidDiscarded", len(invalid))
@ -506,17 +505,17 @@ func (node *Node) initNodeConfiguration() (service.NodeConfig, chan p2p.Peer) {
var err error var err error
node.shardGroupReceiver, err = node.host.GroupReceiver(node.NodeConfig.GetShardGroupID()) node.shardGroupReceiver, err = node.host.GroupReceiver(node.NodeConfig.GetShardGroupID())
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to create shard receiver", "msg", err) utils.GetLogInstance().Error("Failed to create shard receiver", "err", err)
} }
node.globalGroupReceiver, err = node.host.GroupReceiver(p2p.GroupIDBeaconClient) node.globalGroupReceiver, err = node.host.GroupReceiver(p2p.GroupIDBeaconClient)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to create global receiver", "msg", err) utils.GetLogInstance().Error("Failed to create global receiver", "err", err)
} }
node.clientReceiver, err = node.host.GroupReceiver(node.NodeConfig.GetClientGroupID()) node.clientReceiver, err = node.host.GroupReceiver(node.NodeConfig.GetClientGroupID())
if err != nil { if err != nil {
utils.GetLogInstance().Error("Failed to create client receiver", "msg", err) utils.GetLogInstance().Error("Failed to create client receiver", "err", err)
} }
return nodeConfig, chanPeer return nodeConfig, chanPeer
} }

@ -59,24 +59,25 @@ func (w *Worker) throttleTxs(selected types.Transactions, recentTxsStats types.R
var sender common.Address var sender common.Address
msg, err := tx.AsMessage(s) msg, err := tx.AsMessage(s)
if err != nil { if err != nil {
utils.GetLogInstance().Error("Error when parsing tx into message", "msg", err) utils.GetLogInstance().Error("Error when parsing tx into message",
"tx Id", tx.Hash().Hex(), "err", err)
} else { } else {
sender = msg.From() sender = msg.From()
} }
// already selected max num txs // already selected max num txs
if len(selected) > (*txsThrottleConfig).MaxTxsPerBlockLimit { if len(selected) > txsThrottleConfig.MaxTxsPerBlockLimit {
utils.GetLogInstance().Info( utils.GetLogInstance().Info("Throttling tx with max txs per block limit",
"msg", "Throttling tx with max txs per block limit", "tx Id", tx.Hash().Hex(),
"MaxTxsPerBlockLimit", txsThrottleConfig.MaxTxsPerBlockLimit) "MaxTxsPerBlockLimit", txsThrottleConfig.MaxTxsPerBlockLimit)
return sender, shardingconfig.TxUnselect return sender, shardingconfig.TxUnselect
} }
// throttle a single sender sending too many transactions in one block // throttle a single sender sending too many transactions in one block
if (txsThrottleConfig.MaxTxAmountLimit).Cmp(tx.Value()) < 0 { if txsThrottleConfig.MaxTxAmountNanoLimit.Cmp(tx.Value()) < 0 {
utils.GetLogInstance().Info( utils.GetLogInstance().Info("Throttling tx with max amount limit",
"msg", "Throttling tx with max amount limit", "tx Id", tx.Hash().Hex(),
"MaxTxAmountLimit", txsThrottleConfig.MaxTxAmountLimit.Uint64(), "MaxTxAmountNanoLimit", txsThrottleConfig.MaxTxAmountNanoLimit.Uint64(),
"Tx amount", tx.Value().Uint64()) "Tx amount", tx.Value().Uint64())
return sender, shardingconfig.TxInvalid return sender, shardingconfig.TxInvalid
} }
@ -87,8 +88,8 @@ func (w *Worker) throttleTxs(selected types.Transactions, recentTxsStats types.R
numTxsPastHour += blockTxsCounts[sender] numTxsPastHour += blockTxsCounts[sender]
} }
if numTxsPastHour >= txsThrottleConfig.MaxNumRecentTxsPerAccountLimit { if numTxsPastHour >= txsThrottleConfig.MaxNumRecentTxsPerAccountLimit {
utils.GetLogInstance().Info( utils.GetLogInstance().Info("Throttling tx with max txs per account in a single block limit",
"msg", "Throttling tx with max txs per account in a single block limit", "tx Id", tx.Hash().Hex(),
"MaxNumRecentTxsPerAccountLimit", txsThrottleConfig.MaxNumRecentTxsPerAccountLimit) "MaxNumRecentTxsPerAccountLimit", txsThrottleConfig.MaxNumRecentTxsPerAccountLimit)
return sender, shardingconfig.TxUnselect return sender, shardingconfig.TxUnselect
} }
@ -133,8 +134,7 @@ func (w *Worker) SelectTransactionsForNewBlock(newBlockNum uint64, txs types.Tra
// log invalid or unselected txs // log invalid or unselected txs
if flag == shardingconfig.TxUnselect || flag == shardingconfig.TxInvalid { if flag == shardingconfig.TxUnselect || flag == shardingconfig.TxInvalid {
utils.GetLogInstance().Info( utils.GetLogInstance().Info("Transaction Throttle flag",
"msg", "Transaction Throttle flag",
"Transaction Id", tx.Hash().Hex(), "Transaction Id", tx.Hash().Hex(),
"txThrottleFlag", flag.String()) "txThrottleFlag", flag.String())
} }

Loading…
Cancel
Save