@ -33,6 +33,7 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
alias Indexer.Fetcher.Arbitrum.DA.Common , as : DataAvailabilityInfo
alias Indexer.Fetcher.Arbitrum.DA.Common , as : DataAvailabilityInfo
alias Indexer.Fetcher.Arbitrum.DA . { Anytrust , Celestia }
alias Indexer.Fetcher.Arbitrum.DA . { Anytrust , Celestia }
alias Indexer.Fetcher.Arbitrum.Utils . { Db , Logging , Rpc }
alias Indexer.Fetcher.Arbitrum.Utils . { Db , Logging , Rpc }
alias Indexer.Fetcher.Arbitrum.Utils.Helper , as : ArbitrumHelper
alias Indexer.Helper , as : IndexerHelper
alias Indexer.Helper , as : IndexerHelper
alias Explorer.Chain
alias Explorer.Chain
@ -44,8 +45,6 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
# keccak256("SequencerBatchDelivered(uint256,bytes32,bytes32,bytes32,uint256,(uint64,uint64,uint64,uint64),uint8)")
# keccak256("SequencerBatchDelivered(uint256,bytes32,bytes32,bytes32,uint256,(uint64,uint64,uint64,uint64),uint8)")
@event_sequencer_batch_delivered " 0x7394f4a19a13c7b92b5bb71033245305946ef78452f7b4986ac1390b5df4ebd7 "
@event_sequencer_batch_delivered " 0x7394f4a19a13c7b92b5bb71033245305946ef78452f7b4986ac1390b5df4ebd7 "
@max_depth_for_safe_block 1000
@doc """
@doc """
Discovers and imports new batches of rollup transactions within the current L1 block range .
Discovers and imports new batches of rollup transactions within the current L1 block range .
@ -115,31 +114,15 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
) do
) do
# Requesting the "latest" block instead of "safe" allows to catch new batches
# Requesting the "latest" block instead of "safe" allows to catch new batches
# without latency.
# without latency.
{ :ok , latest_block } =
IndexerHelper . get_block_number_by_tag (
" latest " ,
l1_rpc_config . json_rpc_named_arguments ,
Rpc . get_resend_attempts ( )
)
{ safe_chain_block , _ } = IndexerHelper . get_safe_block ( l1_rpc_config . json_rpc_named_arguments )
# max() cannot be used here since l1_rpc_config.logs_block_range must not
# be taken into account to identify if it is L3 or not
safe_block =
if safe_chain_block < latest_block + 1 - @max_depth_for_safe_block do
# The case of L3, the safe block is too far behind the latest block,
# therefore it is assumed that there is no so deep re-orgs there.
latest_block + 1 - min ( @max_depth_for_safe_block , l1_rpc_config . logs_block_range )
else
safe_chain_block
end
# It is necessary to re-visit some amount of the previous blocks to ensure that
# It is necessary to re-visit some amount of the previous blocks to ensure that
# no batches are missed due to reorgs. The amount of blocks to re-visit depends
# no batches are missed due to reorgs. The amount of blocks to re-visit depends
# either on the current safe block but must not exceed @max_depth_for_safe_block
# on the current safe block or the block which is considered as safest in case
# (or L1 RPC max block range for getting logs) since on L3 chains the safe block
# of L3 (where the safe block could be too far behind the latest block) or if
# could be too far behind the latest block.
# RPC does not support "safe" block.
{ safe_block , latest_block } =
Rpc . get_safe_and_latest_l1_blocks ( l1_rpc_config . json_rpc_named_arguments , l1_rpc_config . logs_block_range )
# At the same time it does not make sense to re-visit blocks that will be
# At the same time it does not make sense to re-visit blocks that will be
# re-visited by the historical batches discovery process.
# re-visited by the historical batches discovery process.
# If the new batches discovery process does not reach the chain head previously
# If the new batches discovery process does not reach the chain head previously
@ -154,30 +137,23 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
# Since with taking the safe block into account, the range safe_start_block..end_block
# Since with taking the safe block into account, the range safe_start_block..end_block
# could be larger than L1 RPC max block range for getting logs, it is necessary to
# could be larger than L1 RPC max block range for getting logs, it is necessary to
# divide the range into the chunks
# divide the range into the chunks
safe_start_block
ArbitrumHelper . execute_for_block_range_in_chunks (
|> Stream . unfold ( fn
safe_start_block ,
current when current > end_block ->
end_block ,
nil
l1_rpc_config . logs_block_range ,
fn chunk_start , chunk_end ->
current ->
discover (
next = min ( current + l1_rpc_config . logs_block_range - 1 , end_block )
sequencer_inbox_address ,
{ current , next + 1 }
chunk_start ,
end )
chunk_end ,
|> Stream . each ( fn chunk_start ->
new_batches_limit ,
chunk_end = min ( chunk_start + l1_rpc_config . logs_block_range - 1 , end_block )
messages_to_blocks_shift ,
l1_rpc_config ,
discover (
node_interface_address ,
sequencer_inbox_address ,
rollup_rpc_config
chunk_start ,
)
chunk_end ,
end
new_batches_limit ,
)
messages_to_blocks_shift ,
l1_rpc_config ,
node_interface_address ,
rollup_rpc_config
)
end )
|> Stream . run ( )
{ :ok , end_block }
{ :ok , end_block }
else
else
@ -532,25 +508,25 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
rollup_rpc_config
rollup_rpc_config
) do
) do
Enum . each ( l1_block_ranges , fn { start_block , end_block } ->
Enum . each ( l1_block_ranges , fn { start_block , end_block } ->
Enum . each ( 0 . . div ( end_block - start_block , l1_rpc_config . logs_block_range ) , fn i ->
ArbitrumHelper . execute_for_block_range_in_chunks (
start_block = start_block + i * l1_rpc_config . logs_block_range
start_block ,
end_block = min ( start_block + l1_rpc_config . logs_block_range - 1 , end_block )
end_block ,
l1_rpc_config . logs_block_range ,
log_info ( " Block range for missing batches discovery: #{ start_block } .. #{ end_block } " )
fn chunk_start , chunk_end ->
# `do_discover` is not used here to demonstrate the need to fetch batches
# `do_discover` is not used here to demonstrate the need to fetch batches
# which are already historical
# which are already historical
discover_historical (
discover_historical (
sequencer_inbox_address ,
sequencer_inbox_address ,
chunk_start ,
start_blo ck,
chun k_end ,
end_block ,
new_batches_limit ,
new_batches_limi t,
messages_to_blocks_shif t ,
messages_to_blocks_shift ,
l1_rpc_config ,
l1_rpc_config ,
node_interface_address ,
node_interface_address ,
rollup_rpc_config
rollup_rpc_config
)
)
end
end )
)
end )
end )
end
end
@ -699,9 +675,11 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
# This function analyzes SequencerBatchDelivered event logs to identify new batches
# This function analyzes SequencerBatchDelivered event logs to identify new batches
# and retrieves their details, avoiding the reprocessing of batches already known
# and retrieves their details, avoiding the reprocessing of batches already known
# in the database. It enriches the details of new batches with data from corresponding
# in the database. It enriches the details of new batches with data from corresponding
# L1 transactions and blocks, including timestamps and block ranges. The function
# L1 transactions and blocks, including timestamps and block ranges. The lifecycle
# then prepares batches, associated rollup blocks and transactions, lifecycle
# transactions for already known batches are updated with actual block numbers and
# transactions and Data Availability related records for database import.
# timestamps. The function then prepares batches, associated rollup blocks and
# transactions, lifecycle transactions and Data Availability related records for
# database import.
# Additionally, L2-to-L1 messages initiated in the rollup blocks associated with the
# Additionally, L2-to-L1 messages initiated in the rollup blocks associated with the
# discovered batches are retrieved from the database, marked as `:sent`, and prepared
# discovered batches are retrieved from the database, marked as `:sent`, and prepared
# for database import.
# for database import.
@ -1341,21 +1319,12 @@ defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
block_num = existing_commitment_txs [ tx . hash ]
block_num = existing_commitment_txs [ tx . hash ]
ts = block_to_ts [ block_num ]
ts = block_to_ts [ block_num ]
if tx . block_number == block_num and DateTime . compare ( tx . timestamp , ts ) == :eq do
case ArbitrumHelper . compare_lifecycle_tx_and_update ( tx , { block_num , ts } , " commitment " ) do
txs
{ :updated , updated_tx } ->
else
Map . put ( txs , tx . hash , updated_tx )
log_info (
" The commitment transaction 0x #{ tx . hash |> Base . encode16 ( case : :lower ) } will be updated with the new block number and timestamp "
)
Map . put (
_ ->
txs ,
txs
tx . hash ,
Map . merge ( tx , %{
block_number : block_num ,
timestamp : ts
} )
)
end
end
end )
end )
end
end