feat: indexer for cross level messages on Arbitrum (#9312)

* Initial version of x-level messages indexer

* fixes for cspell and credo

* new state of x-level messages

* Monitoring of new L1-to-L2 messages on L1

* new batches discovery

* fetcher workers in separate modules

* proper name

* Fix for responses without "id", e.g. "Too Many Requests"

* update DB with new batches and corresponding data

* update DB with confirmed blocks

* fixes for cspell and credo

* tracking commitments confirmations for L1 to L2 messages

* Proper usign of max function

* tracking completion of L2 to L1 messages

* catchup historical messages to L2

* incorrect version of committed file

* catchup historical messages from L2 and completion of L1-to-L2 messages

* historical batches catchup

* status for historical l2-to-l1 messages

* address matching issue

* catchup historical executions of L2-to-L1 messages

* db query to find unconfirmed blocks gaps

* first changes to catchup historical confirmations

* finalized catchup of historical confirmations

* 4844 blobs support

* fix for the issue with multiple confirmations

* limit amount of batches to handle at once

* Use latest L1 block by fetchers if start block is not configured

* merge issue fix

* missed file

* historical messages discovery

* reduce logs severity

* first iteration to improve documentation for new functionality

* second iteration to improve documentation for new functionality

* third iteration to improve documentation for new functionality

* fourth iteration to improve documentation for new functionality

* fifth iteration to improve documentation for new functionality

* final iteration to improve documentation for new functionality

* merge issues addressed

* code review issues addressed

* code review issues addressed

* fix merge issue

* raising exception in the case of DB inconsistency

* fix formatting issue

* termination case for RollupMessagesCatchup

* code review comments addressed

* code review comments addressed

* consistency in primary keys

* dialyzer fix

* code review comments addressed

* missed doc comment

* code review comments addressed

* updated indices creation as per code review comments

* fix merge issue

* configuration of intervals as time variables

* TODO added to reflect improvement ability

* database fields refactoring

* association renaming

* feat: APIv2 endpoints for Arbitrum messages and batches (#9963)

* Arbitrum related info in Transaction and Block views

* Views to get info about batches and messages

* usage of committed for batches instead of confirmed

* merge issues addressed

* changes after merge

* formatting issue fix

* code review comment addressed

* associations and fields in api response renamed

* format issue addressed

* feat: Arbitrum-specific fields in the block and transaction API endpoints (#10067)

* Arbitrum related info in Transaction and Block views

* Views to get info about batches and messages

* usage of committed for batches instead of confirmed

* merge issues addressed

* changes after merge

* formatting issue fix

* block and transaction views extended

* code review comment addressed

* associations and fields in api response renamed

* format issue addressed

* fix credo issue

* fix tests issues

* ethereumjsonrpc test fail investigation

* test issues fixes
pull/10129/head
Alexander Kolotov 6 months ago committed by GitHub
parent bf3f32137d
commit 35c885def5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 24
      apps/block_scout_web/lib/block_scout_web/api_router.ex
  2. 12
      apps/block_scout_web/lib/block_scout_web/chain.ex
  3. 163
      apps/block_scout_web/lib/block_scout_web/controllers/api/v2/arbitrum_controller.ex
  4. 50
      apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex
  5. 47
      apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex
  6. 425
      apps/block_scout_web/lib/block_scout_web/views/api/v2/arbitrum_view.ex
  7. 10
      apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex
  8. 40
      apps/block_scout_web/lib/block_scout_web/views/api/v2/helper.ex
  9. 14
      apps/block_scout_web/lib/block_scout_web/views/api/v2/transaction_view.ex
  10. 54
      apps/block_scout_web/lib/block_scout_web/views/api/v2/zksync_view.ex
  11. 3
      apps/block_scout_web/mix.exs
  12. 63
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/block.ex
  13. 5
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/blocks.ex
  14. 48
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/http.ex
  15. 91
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/receipt.ex
  16. 3
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/receipts.ex
  17. 14
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/transaction.ex
  18. 60
      apps/ethereum_jsonrpc/test/ethereum_jsonrpc/block_test.exs
  19. 3
      apps/explorer/config/dev.exs
  20. 4
      apps/explorer/config/prod.exs
  21. 1
      apps/explorer/config/test.exs
  22. 1
      apps/explorer/lib/explorer/application.ex
  23. 63
      apps/explorer/lib/explorer/chain.ex
  24. 53
      apps/explorer/lib/explorer/chain/arbitrum/batch_block.ex
  25. 52
      apps/explorer/lib/explorer/chain/arbitrum/batch_transaction.ex
  26. 62
      apps/explorer/lib/explorer/chain/arbitrum/l1_batch.ex
  27. 46
      apps/explorer/lib/explorer/chain/arbitrum/l1_execution.ex
  28. 54
      apps/explorer/lib/explorer/chain/arbitrum/lifecycle_transaction.ex
  29. 57
      apps/explorer/lib/explorer/chain/arbitrum/message.ex
  30. 913
      apps/explorer/lib/explorer/chain/arbitrum/reader.ex
  31. 59
      apps/explorer/lib/explorer/chain/block.ex
  32. 16
      apps/explorer/lib/explorer/chain/cache/helper.ex
  33. 104
      apps/explorer/lib/explorer/chain/import/runner/arbitrum/batch_blocks.ex
  34. 79
      apps/explorer/lib/explorer/chain/import/runner/arbitrum/batch_transactions.ex
  35. 112
      apps/explorer/lib/explorer/chain/import/runner/arbitrum/l1_batches.ex
  36. 102
      apps/explorer/lib/explorer/chain/import/runner/arbitrum/l1_executions.ex
  37. 107
      apps/explorer/lib/explorer/chain/import/runner/arbitrum/lifecycle_transactions.ex
  38. 117
      apps/explorer/lib/explorer/chain/import/runner/arbitrum/messages.ex
  39. 88
      apps/explorer/lib/explorer/chain/import/runner/transactions.ex
  40. 16
      apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex
  41. 81
      apps/explorer/lib/explorer/chain/transaction.ex
  42. 1
      apps/explorer/lib/explorer/chain_spec/genesis_data.ex
  43. 10
      apps/explorer/lib/explorer/repo.ex
  44. 34
      apps/explorer/lib/explorer/utility/missing_block_range.ex
  45. 124
      apps/explorer/priv/arbitrum/migrations/20240201125730_create_arbitrum_tables.exs
  46. 15
      apps/explorer/priv/arbitrum/migrations/20240510184858_extend_transaction_and_block_tables.exs
  47. 28
      apps/explorer/test/support/factory.ex
  48. 12
      apps/indexer/lib/indexer/block/fetcher.ex
  49. 295
      apps/indexer/lib/indexer/fetcher/arbitrum/messaging.ex
  50. 365
      apps/indexer/lib/indexer/fetcher/arbitrum/rollup_messages_catchup.ex
  51. 459
      apps/indexer/lib/indexer/fetcher/arbitrum/tracking_batches_statuses.ex
  52. 223
      apps/indexer/lib/indexer/fetcher/arbitrum/tracking_messages_on_l1.ex
  53. 787
      apps/indexer/lib/indexer/fetcher/arbitrum/utils/db.ex
  54. 86
      apps/indexer/lib/indexer/fetcher/arbitrum/utils/helper.ex
  55. 162
      apps/indexer/lib/indexer/fetcher/arbitrum/utils/logging.ex
  56. 391
      apps/indexer/lib/indexer/fetcher/arbitrum/utils/rpc.ex
  57. 284
      apps/indexer/lib/indexer/fetcher/arbitrum/workers/historical_messages_on_l2.ex
  58. 74
      apps/indexer/lib/indexer/fetcher/arbitrum/workers/l1_finalization.ex
  59. 975
      apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_batches.ex
  60. 1034
      apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_confirmations.ex
  61. 413
      apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_l1_executions.ex
  62. 346
      apps/indexer/lib/indexer/fetcher/arbitrum/workers/new_messages_to_l2.ex
  63. 1
      apps/indexer/lib/indexer/fetcher/optimism.ex
  64. 1
      apps/indexer/lib/indexer/fetcher/polygon_edge.ex
  65. 52
      apps/indexer/lib/indexer/fetcher/polygon_zkevm/bridge.ex
  66. 2
      apps/indexer/lib/indexer/fetcher/zksync/utils/db.ex
  67. 10
      apps/indexer/lib/indexer/fetcher/zksync/utils/rpc.ex
  68. 259
      apps/indexer/lib/indexer/helper.ex
  69. 12
      apps/indexer/lib/indexer/supervisor.ex
  70. 44
      apps/indexer/lib/indexer/transform/arbitrum/messaging.ex
  71. 74
      apps/indexer/lib/indexer/transform/transaction_actions.ex
  72. 1
      config/config_helper.exs
  73. 43
      config/runtime.exs
  74. 9
      config/runtime/dev.exs
  75. 8
      config/runtime/prod.exs
  76. 13
      cspell.json

@ -196,6 +196,10 @@ defmodule BlockScoutWeb.ApiRouter do
get("/zksync-batch/:batch_number", V2.TransactionController, :zksync_batch)
end
if Application.compile_env(:explorer, :chain_type) == :arbitrum do
get("/arbitrum-batch/:batch_number", V2.TransactionController, :arbitrum_batch)
end
if Application.compile_env(:explorer, :chain_type) == :suave do
get("/execution-node/:execution_node_hash_param", V2.TransactionController, :execution_node)
end
@ -219,6 +223,10 @@ defmodule BlockScoutWeb.ApiRouter do
get("/:block_hash_or_number/transactions", V2.BlockController, :transactions)
get("/:block_hash_or_number/internal-transactions", V2.BlockController, :internal_transactions)
get("/:block_hash_or_number/withdrawals", V2.BlockController, :withdrawals)
if Application.compile_env(:explorer, :chain_type) == :arbitrum do
get("/arbitrum-batch/:batch_number", V2.BlockController, :arbitrum_batch)
end
end
scope "/addresses" do
@ -277,6 +285,12 @@ defmodule BlockScoutWeb.ApiRouter do
get("/zksync/batches/confirmed", V2.ZkSyncController, :batches_confirmed)
get("/zksync/batches/latest-number", V2.ZkSyncController, :batch_latest_number)
end
if Application.compile_env(:explorer, :chain_type) == :arbitrum do
get("/arbitrum/messages/to-rollup", V2.ArbitrumController, :recent_messages_to_l2)
get("/arbitrum/batches/committed", V2.ArbitrumController, :batches_committed)
get("/arbitrum/batches/latest-number", V2.ArbitrumController, :batch_latest_number)
end
end
scope "/stats" do
@ -402,6 +416,16 @@ defmodule BlockScoutWeb.ApiRouter do
get("/worlds/:world/tables/:table_id/records/:record_id", V2.MudController, :world_table_record)
end
end
scope "/arbitrum" do
if Application.compile_env(:explorer, :chain_type) == :arbitrum do
get("/messages/:direction", V2.ArbitrumController, :messages)
get("/messages/:direction/count", V2.ArbitrumController, :messages_count)
get("/batches", V2.ArbitrumController, :batches)
get("/batches/count", V2.ArbitrumController, :batches_count)
get("/batches/:batch_number", V2.ArbitrumController, :batch)
end
end
end
scope "/v1/graphql" do

@ -433,7 +433,11 @@ defmodule BlockScoutWeb.Chain do
end
end
# clause for Polygon Edge Deposits and Withdrawals and for account's entities pagination
# clause for pagination of entities:
# - Account's entities
# - Polygon Edge Deposits
# - Polygon Edge Withdrawals
# - Arbitrum cross chain messages
def paging_options(%{"id" => id_string}) when is_binary(id_string) do
case Integer.parse(id_string) do
{id, ""} ->
@ -444,7 +448,11 @@ defmodule BlockScoutWeb.Chain do
end
end
# clause for Polygon Edge Deposits and Withdrawals and for account's entities pagination
# clause for pagination of entities:
# - Account's entities
# - Polygon Edge Deposits
# - Polygon Edge Withdrawals
# - Arbitrum cross chain messages
def paging_options(%{"id" => id}) when is_integer(id) do
[paging_options: %{@default_paging_options | key: {id}}]
end

@ -0,0 +1,163 @@
defmodule BlockScoutWeb.API.V2.ArbitrumController do
use BlockScoutWeb, :controller
import BlockScoutWeb.Chain,
only: [
next_page_params: 4,
paging_options: 1,
split_list_by_page: 1
]
alias Explorer.PagingOptions
alias Explorer.Chain.Arbitrum.{L1Batch, Message, Reader}
action_fallback(BlockScoutWeb.API.V2.FallbackController)
@batch_necessity_by_association %{:commitment_transaction => :optional}
@doc """
Function to handle GET requests to `/api/v2/arbitrum/messages/:direction` endpoint.
"""
@spec messages(Plug.Conn.t(), map()) :: Plug.Conn.t()
def messages(conn, %{"direction" => direction} = params) do
options =
params
|> paging_options()
|> Keyword.put(:api?, true)
{messages, next_page} =
direction
|> Reader.messages(options)
|> split_list_by_page()
next_page_params =
next_page_params(
next_page,
messages,
params,
fn %Message{message_id: msg_id} -> %{"id" => msg_id} end
)
conn
|> put_status(200)
|> render(:arbitrum_messages, %{
messages: messages,
next_page_params: next_page_params
})
end
@doc """
Function to handle GET requests to `/api/v2/arbitrum/messages/:direction/count` endpoint.
"""
@spec messages_count(Plug.Conn.t(), map()) :: Plug.Conn.t()
def messages_count(conn, %{"direction" => direction} = _params) do
conn
|> put_status(200)
|> render(:arbitrum_messages_count, %{count: Reader.messages_count(direction, api?: true)})
end
@doc """
Function to handle GET requests to `/api/v2/arbitrum/batches/:batch_number` endpoint.
"""
@spec batch(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batch(conn, %{"batch_number" => batch_number} = _params) do
case Reader.batch(
batch_number,
necessity_by_association: @batch_necessity_by_association,
api?: true
) do
{:ok, batch} ->
conn
|> put_status(200)
|> render(:arbitrum_batch, %{batch: batch})
{:error, :not_found} = res ->
res
end
end
@doc """
Function to handle GET requests to `/api/v2/arbitrum/batches/count` endpoint.
"""
@spec batches_count(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batches_count(conn, _params) do
conn
|> put_status(200)
|> render(:arbitrum_batches_count, %{count: Reader.batches_count(api?: true)})
end
@doc """
Function to handle GET requests to `/api/v2/arbitrum/batches` endpoint.
"""
@spec batches(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batches(conn, params) do
{batches, next_page} =
params
|> paging_options()
|> Keyword.put(:necessity_by_association, @batch_necessity_by_association)
|> Keyword.put(:api?, true)
|> Reader.batches()
|> split_list_by_page()
next_page_params =
next_page_params(
next_page,
batches,
params,
fn %L1Batch{number: number} -> %{"number" => number} end
)
conn
|> put_status(200)
|> render(:arbitrum_batches, %{
batches: batches,
next_page_params: next_page_params
})
end
@doc """
Function to handle GET requests to `/api/v2/main-page/arbitrum/batches/committed` endpoint.
"""
@spec batches_committed(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batches_committed(conn, _params) do
batches =
[]
|> Keyword.put(:necessity_by_association, @batch_necessity_by_association)
|> Keyword.put(:api?, true)
|> Keyword.put(:committed?, true)
|> Reader.batches()
conn
|> put_status(200)
|> render(:arbitrum_batches, %{batches: batches})
end
@doc """
Function to handle GET requests to `/api/v2/main-page/arbitrum/batches/latest-number` endpoint.
"""
@spec batch_latest_number(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batch_latest_number(conn, _params) do
conn
|> put_status(200)
|> render(:arbitrum_batch_latest_number, %{number: batch_latest_number()})
end
defp batch_latest_number do
case Reader.batch(:latest, api?: true) do
{:ok, batch} -> batch.number
{:error, :not_found} -> 0
end
end
@doc """
Function to handle GET requests to `/api/v2/main-page/arbitrum/messages/to-rollup` endpoint.
"""
@spec recent_messages_to_l2(Plug.Conn.t(), map()) :: Plug.Conn.t()
def recent_messages_to_l2(conn, _params) do
messages = Reader.relayed_l1_to_l2_messages(paging_options: %PagingOptions{page_size: 6}, api?: true)
conn
|> put_status(200)
|> render(:arbitrum_messages, %{messages: messages})
end
end

@ -19,6 +19,7 @@ defmodule BlockScoutWeb.API.V2.BlockController do
alias BlockScoutWeb.API.V2.{TransactionView, WithdrawalView}
alias Explorer.Chain
alias Explorer.Chain.Arbitrum.Reader, as: ArbitrumReader
alias Explorer.Chain.InternalTransaction
case Application.compile_env(:explorer, :chain_type) do
@ -39,6 +40,14 @@ defmodule BlockScoutWeb.API.V2.BlockController do
:zksync_execute_transaction => :optional
}
:arbitrum ->
@chain_type_transaction_necessity_by_association %{}
@chain_type_block_necessity_by_association %{
:arbitrum_batch => :optional,
:arbitrum_commitment_transaction => :optional,
:arbitrum_confirmation_transaction => :optional
}
_ ->
@chain_type_transaction_necessity_by_association %{}
@chain_type_block_necessity_by_association %{}
@ -85,20 +94,6 @@ defmodule BlockScoutWeb.API.V2.BlockController do
api?: true
]
@block_params [
necessity_by_association:
%{
[miner: :names] => :optional,
:uncles => :optional,
:nephews => :optional,
:rewards => :optional,
:transactions => :optional,
:withdrawals => :optional
}
|> Map.merge(@chain_type_block_necessity_by_association),
api?: true
]
action_fallback(BlockScoutWeb.API.V2.FallbackController)
@doc """
@ -155,6 +150,33 @@ defmodule BlockScoutWeb.API.V2.BlockController do
})
end
@doc """
Function to handle GET requests to `/api/v2/blocks/arbitrum-batch/:batch_number` endpoint.
It renders the list of L2 blocks bound to the specified batch.
"""
@spec arbitrum_batch(Plug.Conn.t(), any()) :: Plug.Conn.t()
def arbitrum_batch(conn, %{"batch_number" => batch_number} = params) do
full_options =
params
|> select_block_type()
|> Keyword.merge(paging_options(params))
|> Keyword.merge(@api_true)
{blocks, next_page} =
batch_number
|> ArbitrumReader.batch_blocks(full_options)
|> split_list_by_page()
next_page_params = next_page |> next_page_params(blocks, delete_parameters_from_next_page_params(params))
conn
|> put_status(200)
|> render(:blocks, %{
blocks: blocks |> maybe_preload_ens() |> maybe_preload_metadata(),
next_page_params: next_page_params
})
end
@doc """
Function to handle GET requests to `/api/v2/blocks/:block_hash_or_number/transactions` endpoint.
"""

@ -32,6 +32,7 @@ defmodule BlockScoutWeb.API.V2.TransactionController do
alias BlockScoutWeb.MicroserviceInterfaces.TransactionInterpretation, as: TransactionInterpretationService
alias BlockScoutWeb.Models.TransactionStateHelper
alias Explorer.Chain
alias Explorer.Chain.Arbitrum.Reader, as: ArbitrumReader
alias Explorer.Chain.Beacon.Reader, as: BeaconReader
alias Explorer.Chain.{Hash, InternalTransaction, Transaction}
alias Explorer.Chain.PolygonZkevm.Reader, as: PolygonZkevmReader
@ -114,6 +115,14 @@ defmodule BlockScoutWeb.API.V2.TransactionController do
|> Map.put(:zksync_prove_transaction, :optional)
|> Map.put(:zksync_execute_transaction, :optional)
:arbitrum ->
necessity_by_association_with_actions
|> Map.put(:arbitrum_batch, :optional)
|> Map.put(:arbitrum_commitment_transaction, :optional)
|> Map.put(:arbitrum_confirmation_transaction, :optional)
|> Map.put(:arbitrum_message_to_l2, :optional)
|> Map.put(:arbitrum_message_from_l2, :optional)
:suave ->
necessity_by_association_with_actions
|> Map.put(:logs, :optional)
@ -194,7 +203,35 @@ defmodule BlockScoutWeb.API.V2.TransactionController do
It renders the list of L2 transactions bound to the specified batch.
"""
@spec zksync_batch(Plug.Conn.t(), map()) :: Plug.Conn.t()
def zksync_batch(conn, %{"batch_number" => batch_number} = params) do
def zksync_batch(conn, params) do
handle_batch_transactions(conn, params, &ZkSyncReader.batch_transactions/2)
end
@doc """
Function to handle GET requests to `/api/v2/transactions/arbitrum-batch/:batch_number` endpoint.
It renders the list of L2 transactions bound to the specified batch.
"""
@spec arbitrum_batch(Plug.Conn.t(), map()) :: Plug.Conn.t()
def arbitrum_batch(conn, params) do
handle_batch_transactions(conn, params, &ArbitrumReader.batch_transactions/2)
end
# Processes and renders transactions for a specified batch into an HTTP response.
#
# This function retrieves a list of transactions for a given batch using a specified function,
# then extracts the transaction hashes. These hashes are used to retrieve the corresponding
# `Explorer.Chain.Transaction` records according to the given pagination options. It formats
# these transactions into an HTTP response.
#
# ## Parameters
# - `conn`: The connection object.
# - `params`: Parameters from the request, including the batch number.
# - `batch_transactions_fun`: A function to fetch transaction descriptions for the given batch.
#
# ## Returns
# - Updated connection object with the transactions data rendered.
@spec handle_batch_transactions(Plug.Conn.t(), map(), function()) :: Plug.Conn.t()
defp handle_batch_transactions(conn, %{"batch_number" => batch_number} = params, batch_transactions_fun) do
full_options =
[
necessity_by_association: @transaction_necessity_by_association
@ -206,13 +243,13 @@ defmodule BlockScoutWeb.API.V2.TransactionController do
# it will require to re-implement all pagination logic existing in Explorer.Chain.Transaction
# In order to simplify the code, all transaction are requested from the batch and then
# only subset of them is returned from `hashes_to_transactions`.
raw_transactions_list =
transactions_plus_one =
batch_number
|> ZkSyncReader.batch_transactions(api?: true)
|> Enum.map(fn tx -> tx.hash end)
|> batch_transactions_fun.(@api_true)
|> Enum.map(fn tx -> tx.tx_hash end)
|> Chain.hashes_to_transactions(full_options)
{transactions, next_page} = split_list_by_page(raw_transactions_list)
{transactions, next_page} = split_list_by_page(transactions_plus_one)
next_page_params = next_page |> next_page_params(transactions, delete_parameters_from_next_page_params(params))
conn

@ -0,0 +1,425 @@
defmodule BlockScoutWeb.API.V2.ArbitrumView do
use BlockScoutWeb, :view
alias BlockScoutWeb.API.V2.Helper, as: APIV2Helper
alias Explorer.Chain.{Block, Hash, Transaction, Wei}
alias Explorer.Chain.Arbitrum.{L1Batch, LifecycleTransaction}
@doc """
Function to render GET requests to `/api/v2/arbitrum/messages/:direction` endpoint.
"""
@spec render(binary(), map()) :: map() | non_neg_integer()
def render("arbitrum_messages.json", %{
messages: messages,
next_page_params: next_page_params
}) do
messages_out =
messages
|> Enum.map(fn msg ->
%{
"id" => msg.message_id,
"origination_address" => msg.originator_address,
"origination_transaction_hash" => msg.originating_transaction_hash,
"origination_timestamp" => msg.origination_timestamp,
"origination_transaction_block_number" => msg.originating_transaction_block_number,
"completion_transaction_hash" => msg.completion_transaction_hash,
"status" => msg.status
}
end)
%{
items: messages_out,
next_page_params: next_page_params
}
end
@doc """
Function to render GET requests to `/api/v2/main-page/arbitrum/messages/to-rollup` endpoint.
"""
def render("arbitrum_messages.json", %{messages: messages}) do
messages_out =
messages
|> Enum.map(fn msg ->
%{
"origination_transaction_hash" => msg.originating_transaction_hash,
"origination_timestamp" => msg.origination_timestamp,
"origination_transaction_block_number" => msg.originating_transaction_block_number,
"completion_transaction_hash" => msg.completion_transaction_hash
}
end)
%{items: messages_out}
end
@doc """
Function to render GET requests to `/api/v2/arbitrum/messages/:direction/count` endpoint.
"""
def render("arbitrum_messages_count.json", %{count: count}) do
count
end
@doc """
Function to render GET requests to `/api/v2/arbitrum/batches/:batch_number` endpoint.
"""
def render("arbitrum_batch.json", %{batch: batch}) do
%{
"number" => batch.number,
"transactions_count" => batch.transactions_count,
"start_block" => batch.start_block,
"end_block" => batch.end_block,
"before_acc" => batch.before_acc,
"after_acc" => batch.after_acc
}
|> add_l1_tx_info(batch)
end
@doc """
Function to render GET requests to `/api/v2/arbitrum/batches` endpoint.
"""
def render("arbitrum_batches.json", %{
batches: batches,
next_page_params: next_page_params
}) do
%{
items: render_arbitrum_batches(batches),
next_page_params: next_page_params
}
end
@doc """
Function to render GET requests to `/api/v2/main-page/arbitrum/batches/committed` endpoint.
"""
def render("arbitrum_batches.json", %{batches: batches}) do
%{items: render_arbitrum_batches(batches)}
end
@doc """
Function to render GET requests to `/api/v2/arbitrum/batches/count` endpoint.
"""
def render("arbitrum_batches_count.json", %{count: count}) do
count
end
@doc """
Function to render GET requests to `/api/v2/main-page/arbitrum/batches/latest-number` endpoint.
"""
def render("arbitrum_batch_latest_number.json", %{number: number}) do
number
end
# Transforms a list of L1 batches into a map format for HTTP response.
#
# This function processes a list of Arbitrum L1 batches and converts each batch into
# a map that includes basic batch information and details of the associated
# transaction that committed the batch to L1.
#
# ## Parameters
# - `batches`: A list of `Explorer.Chain.Arbitrum.L1Batch` entries.
#
# ## Returns
# - A list of maps with detailed information about each batch, formatted for use
# in JSON HTTP responses.
@spec render_arbitrum_batches([L1Batch]) :: [map()]
defp render_arbitrum_batches(batches) do
Enum.map(batches, fn batch ->
%{
"number" => batch.number,
"transactions_count" => batch.transactions_count,
"block_count" => batch.end_block - batch.start_block + 1
}
|> add_l1_tx_info(batch)
end)
end
@doc """
Extends the json output with a sub-map containing information related Arbitrum.
## Parameters
- `out_json`: a map defining output json which will be extended
- `transaction`: transaction structure containing Arbitrum related data
## Returns
A map extended with data related Arbitrum rollup
"""
@spec extend_transaction_json_response(map(), %{
:__struct__ => Transaction,
:arbitrum_batch => any(),
:arbitrum_commitment_transaction => any(),
:arbitrum_confirmation_transaction => any(),
:arbitrum_message_to_l2 => any(),
:arbitrum_message_from_l2 => any(),
:gas_used_for_l1 => Decimal.t(),
:gas_used => Decimal.t(),
:gas_price => Wei.t(),
optional(any()) => any()
}) :: map()
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do
arbitrum_info =
%{}
|> extend_with_settlement_info(transaction)
|> extend_if_message(transaction)
|> extend_with_transaction_info(transaction)
Map.put(out_json, "arbitrum", arbitrum_info)
end
@doc """
Extends the json output with a sub-map containing information related Arbitrum.
## Parameters
- `out_json`: a map defining output json which will be extended
- `block`: block structure containing Arbitrum related data
## Returns
A map extended with data related Arbitrum rollup
"""
@spec extend_block_json_response(map(), %{
:__struct__ => Block,
:arbitrum_batch => any(),
:arbitrum_commitment_transaction => any(),
:arbitrum_confirmation_transaction => any(),
:nonce => Hash.Nonce.t(),
:send_count => non_neg_integer(),
:send_root => Hash.Full.t(),
:l1_block_number => non_neg_integer(),
optional(any()) => any()
}) :: map()
def extend_block_json_response(out_json, %Block{} = block) do
arbitrum_info =
%{}
|> extend_with_settlement_info(block)
|> extend_with_block_info(block)
Map.put(out_json, "arbitrum", arbitrum_info)
end
# Augments an output JSON with settlement-related information such as batch number and L1 transaction details to JSON.
@spec extend_with_settlement_info(map(), %{
:__struct__ => Block | Transaction,
:arbitrum_batch => any(),
:arbitrum_commitment_transaction => any(),
:arbitrum_confirmation_transaction => any(),
optional(any()) => any()
}) :: map()
defp extend_with_settlement_info(out_json, arbitrum_entity) do
out_json
|> add_l1_txs_info_and_status(%{
batch_number: get_batch_number(arbitrum_entity),
commitment_transaction: arbitrum_entity.arbitrum_commitment_transaction,
confirmation_transaction: arbitrum_entity.arbitrum_confirmation_transaction
})
|> Map.put("batch_number", get_batch_number(arbitrum_entity))
end
# Retrieves the batch number from an Arbitrum block or transaction if the batch
# data is loaded.
@spec get_batch_number(%{
:__struct__ => Block | Transaction,
:arbitrum_batch => any(),
optional(any()) => any()
}) :: nil | non_neg_integer()
defp get_batch_number(arbitrum_entity) do
case Map.get(arbitrum_entity, :arbitrum_batch) do
nil -> nil
%Ecto.Association.NotLoaded{} -> nil
value -> value.number
end
end
# Augments an output JSON with commit transaction details and its status.
@spec add_l1_tx_info(map(), %{
:__struct__ => L1Batch,
:commitment_transaction => any(),
optional(any()) => any()
}) :: map()
defp add_l1_tx_info(out_json, %L1Batch{} = batch) do
l1_tx = %{commitment_transaction: handle_associated_l1_txs_properly(batch.commitment_transaction)}
out_json
|> Map.merge(%{
"commitment_transaction" => %{
"hash" => APIV2Helper.get_2map_data(l1_tx, :commitment_transaction, :hash),
"block_number" => APIV2Helper.get_2map_data(l1_tx, :commitment_transaction, :block),
"timestamp" => APIV2Helper.get_2map_data(l1_tx, :commitment_transaction, :ts),
"status" => APIV2Helper.get_2map_data(l1_tx, :commitment_transaction, :status)
}
})
end
# Augments an output JSON with commit and confirm transaction details and their statuses.
@spec add_l1_txs_info_and_status(map(), %{
:commitment_transaction => any(),
:confirmation_transaction => any(),
optional(:batch_number) => any()
}) :: map()
defp add_l1_txs_info_and_status(out_json, arbitrum_item)
when is_map(arbitrum_item) and
is_map_key(arbitrum_item, :commitment_transaction) and
is_map_key(arbitrum_item, :confirmation_transaction) do
l1_txs = get_associated_l1_txs(arbitrum_item)
out_json
|> Map.merge(%{
"status" => block_or_transaction_status(arbitrum_item),
"commitment_transaction" => %{
"hash" => APIV2Helper.get_2map_data(l1_txs, :commitment_transaction, :hash),
"timestamp" => APIV2Helper.get_2map_data(l1_txs, :commitment_transaction, :ts),
"status" => APIV2Helper.get_2map_data(l1_txs, :commitment_transaction, :status)
},
"confirmation_transaction" => %{
"hash" => APIV2Helper.get_2map_data(l1_txs, :confirmation_transaction, :hash),
"timestamp" => APIV2Helper.get_2map_data(l1_txs, :confirmation_transaction, :ts),
"status" => APIV2Helper.get_2map_data(l1_txs, :confirmation_transaction, :status)
}
})
end
# Extract transaction hash and block number, timestamp, finalization status for
# L1 transactions associated with an Arbitrum rollup entity: transaction or block.
#
# ## Parameters
# - `arbitrum_item`: a short description of a transaction, or block.
#
# ## Returns
# A map containing nesting maps describing corresponding L1 transactions
@spec get_associated_l1_txs(%{
:commitment_transaction => any(),
:confirmation_transaction => any(),
optional(any()) => any()
}) :: %{
:commitment_transaction =>
nil
| %{
:hash => nil | binary(),
:block_number => nil | non_neg_integer(),
:ts => nil | DateTime.t(),
:status => nil | :finalized | :unfinalized
},
:confirmation_transaction =>
nil
| %{
:hash => nil | binary(),
:block_number => nil | non_neg_integer(),
:ts => nil | DateTime.t(),
:status => nil | :finalized | :unfinalized
}
}
defp get_associated_l1_txs(arbitrum_item) do
[:commitment_transaction, :confirmation_transaction]
|> Enum.reduce(%{}, fn key, l1_txs ->
Map.put(l1_txs, key, handle_associated_l1_txs_properly(Map.get(arbitrum_item, key)))
end)
end
# Returns details of an associated L1 transaction or nil if not loaded or not available.
@spec handle_associated_l1_txs_properly(LifecycleTransaction | Ecto.Association.NotLoaded.t() | nil) ::
nil
| %{
:hash => nil | binary(),
:block => nil | non_neg_integer(),
:ts => nil | DateTime.t(),
:status => nil | :finalized | :unfinalized
}
defp handle_associated_l1_txs_properly(associated_l1_tx) do
case associated_l1_tx do
nil -> nil
%Ecto.Association.NotLoaded{} -> nil
value -> %{hash: value.hash, block: value.block_number, ts: value.timestamp, status: value.status}
end
end
# Inspects L1 transactions of a rollup block or transaction to determine its status.
#
# ## Parameters
# - `arbitrum_item`: An Arbitrum transaction or block.
#
# ## Returns
# A string with one of predefined statuses
@spec block_or_transaction_status(%{
:commitment_transaction => any(),
:confirmation_transaction => any(),
optional(:batch_number) => any()
}) :: String.t()
defp block_or_transaction_status(arbitrum_item) do
cond do
APIV2Helper.specified?(arbitrum_item.confirmation_transaction) -> "Confirmed on base"
APIV2Helper.specified?(arbitrum_item.commitment_transaction) -> "Sent to base"
not is_nil(arbitrum_item.batch_number) -> "Sealed on rollup"
true -> "Processed on rollup"
end
end
# Determines if an Arbitrum transaction contains a cross-chain message and extends
# the incoming map with the `contains_message` field to reflect the direction of
# the message.
#
# ## Parameters
# - `arbitrum_tx`: An Arbitrum transaction.
#
# ## Returns
# - A map extended with a field indicating the direction of the message.
@spec extend_if_message(map(), %{
:__struct__ => Transaction,
:arbitrum_message_to_l2 => any(),
:arbitrum_message_from_l2 => any(),
optional(any()) => any()
}) :: map()
defp extend_if_message(arbitrum_json, %Transaction{} = arbitrum_tx) do
message_type =
case {APIV2Helper.specified?(arbitrum_tx.arbitrum_message_to_l2),
APIV2Helper.specified?(arbitrum_tx.arbitrum_message_from_l2)} do
{true, false} -> "incoming"
{false, true} -> "outcoming"
_ -> nil
end
Map.put(arbitrum_json, "contains_message", message_type)
end
# Extends the output JSON with information from Arbitrum-specific fields of the transaction.
@spec extend_with_transaction_info(map(), %{
:__struct__ => Transaction,
:gas_used_for_l1 => Decimal.t(),
:gas_used => Decimal.t(),
:gas_price => Wei.t(),
optional(any()) => any()
}) :: map()
defp extend_with_transaction_info(out_json, %Transaction{} = arbitrum_tx) do
gas_used_for_l2 =
arbitrum_tx.gas_used
|> Decimal.sub(arbitrum_tx.gas_used_for_l1)
poster_fee =
arbitrum_tx.gas_price
|> Wei.to(:wei)
|> Decimal.mult(arbitrum_tx.gas_used_for_l1)
network_fee =
arbitrum_tx.gas_price
|> Wei.to(:wei)
|> Decimal.mult(gas_used_for_l2)
out_json
|> Map.put("gas_used_for_l1", arbitrum_tx.gas_used_for_l1)
|> Map.put("gas_used_for_l2", gas_used_for_l2)
|> Map.put("poster_fee", poster_fee)
|> Map.put("network_fee", network_fee)
end
# Extends the output JSON with information from the Arbitrum-specific fields of the block.
@spec extend_with_block_info(map(), %{
:__struct__ => Block,
:nonce => Hash.Nonce.t(),
:send_count => non_neg_integer(),
:send_root => Hash.Full.t(),
:l1_block_number => non_neg_integer(),
optional(any()) => any()
}) :: map()
defp extend_with_block_info(out_json, %Block{} = arbitrum_block) do
out_json
|> Map.put("delayed_messages", Hash.to_integer(arbitrum_block.nonce))
|> Map.put("l1_block_height", arbitrum_block.l1_block_number)
|> Map.put("send_count", arbitrum_block.send_count)
|> Map.put("send_root", arbitrum_block.send_root)
end
end

@ -120,6 +120,16 @@ defmodule BlockScoutWeb.API.V2.BlockView do
end
end
:arbitrum ->
defp chain_type_fields(result, block, single_block?) do
if single_block? do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.ArbitrumView.extend_block_json_response(result, block)
else
result
end
end
:ethereum ->
defp chain_type_fields(result, block, single_block?) do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage

@ -160,4 +160,44 @@ defmodule BlockScoutWeb.API.V2.Helper do
x_days_back = Date.add(latest, -1 * (num_days - 1))
%{earliest: x_days_back, latest: latest}
end
@doc """
Checks if an item associated with a DB entity has actual value
## Parameters
- `associated_item`: an item associated with a DB entity
## Returns
- `false`: if the item is nil or not loaded
- `true`: if the item has actual value
"""
@spec specified?(any()) :: boolean()
def specified?(associated_item) do
case associated_item do
nil -> false
%Ecto.Association.NotLoaded{} -> false
_ -> true
end
end
@doc """
Gets the value of an element nested in a map using two keys.
Clarification: Returns `map[key1][key2]`
## Parameters
- `map`: The high-level map.
- `key1`: The key of the element in `map`.
- `key2`: The key of the element in the map accessible by `map[key1]`.
## Returns
The value of the element, or `nil` if the map accessible by `key1` does not exist.
"""
@spec get_2map_data(map(), any(), any()) :: any()
def get_2map_data(map, key1, key2) do
case Map.get(map, key1) do
nil -> nil
inner_map -> Map.get(inner_map, key2)
end
end
end

@ -837,6 +837,20 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
end
end
:arbitrum ->
defp chain_type_transformations(transactions) do
transactions
end
defp chain_type_fields(result, transaction, single_tx?, _conn, _watchlist_names) do
if single_tx? do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.ArbitrumView.extend_transaction_json_response(result, transaction)
else
result
end
end
:optimism ->
defp chain_type_transformations(transactions) do
transactions

@ -4,6 +4,8 @@ defmodule BlockScoutWeb.API.V2.ZkSyncView do
alias Explorer.Chain.{Block, Transaction}
alias Explorer.Chain.ZkSync.TransactionBatch
alias BlockScoutWeb.API.V2.Helper, as: APIV2Helper
@doc """
Function to render GET requests to `/api/v2/zksync/batches/:batch_number` endpoint.
"""
@ -146,12 +148,12 @@ defmodule BlockScoutWeb.API.V2.ZkSyncView do
out_json
|> Map.merge(%{
"status" => batch_status(zksync_item),
"commit_transaction_hash" => get_2map_data(l1_txs, :commit_transaction, :hash),
"commit_transaction_timestamp" => get_2map_data(l1_txs, :commit_transaction, :ts),
"prove_transaction_hash" => get_2map_data(l1_txs, :prove_transaction, :hash),
"prove_transaction_timestamp" => get_2map_data(l1_txs, :prove_transaction, :ts),
"execute_transaction_hash" => get_2map_data(l1_txs, :execute_transaction, :hash),
"execute_transaction_timestamp" => get_2map_data(l1_txs, :execute_transaction, :ts)
"commit_transaction_hash" => APIV2Helper.get_2map_data(l1_txs, :commit_transaction, :hash),
"commit_transaction_timestamp" => APIV2Helper.get_2map_data(l1_txs, :commit_transaction, :ts),
"prove_transaction_hash" => APIV2Helper.get_2map_data(l1_txs, :prove_transaction, :hash),
"prove_transaction_timestamp" => APIV2Helper.get_2map_data(l1_txs, :prove_transaction, :ts),
"execute_transaction_hash" => APIV2Helper.get_2map_data(l1_txs, :execute_transaction, :hash),
"execute_transaction_timestamp" => APIV2Helper.get_2map_data(l1_txs, :execute_transaction, :ts)
})
end
@ -183,47 +185,13 @@ defmodule BlockScoutWeb.API.V2.ZkSyncView do
# A string with one of predefined statuses
defp batch_status(zksync_item) do
cond do
specified?(zksync_item.execute_transaction) -> "Executed on L1"
specified?(zksync_item.prove_transaction) -> "Validated on L1"
specified?(zksync_item.commit_transaction) -> "Sent to L1"
APIV2Helper.specified?(zksync_item.execute_transaction) -> "Executed on L1"
APIV2Helper.specified?(zksync_item.prove_transaction) -> "Validated on L1"
APIV2Helper.specified?(zksync_item.commit_transaction) -> "Sent to L1"
# Batch entity itself has no batch_number
not Map.has_key?(zksync_item, :batch_number) -> "Sealed on L2"
not is_nil(zksync_item.batch_number) -> "Sealed on L2"
true -> "Processed on L2"
end
end
# Checks if an item associated with a DB entity has actual value
#
# ## Parameters
# - `associated_item`: an item associated with a DB entity
#
# ## Returns
# - `false`: if the item is nil or not loaded
# - `true`: if the item has actual value
defp specified?(associated_item) do
case associated_item do
nil -> false
%Ecto.Association.NotLoaded{} -> false
_ -> true
end
end
# Gets the value of an element nested in a map using two keys.
#
# Clarification: Returns `map[key1][key2]`
#
# ## Parameters
# - `map`: The high-level map.
# - `key1`: The key of the element in `map`.
# - `key2`: The key of the element in the map accessible by `map[key1]`.
#
# ## Returns
# The value of the element, or `nil` if the map accessible by `key1` does not exist.
defp get_2map_data(map, key1, key2) do
case Map.get(map, key1) do
nil -> nil
inner_map -> Map.get(inner_map, key2)
end
end
end

@ -31,7 +31,8 @@ defmodule BlockScoutWeb.Mixfile do
Explorer.Chain.Cache.OptimismFinalizationPeriod,
Explorer.Chain.Optimism.OutputRoot,
Explorer.Chain.Optimism.WithdrawalEvent,
Explorer.Chain.ZkSync.Reader
Explorer.Chain.ZkSync.Reader,
Explorer.Chain.Arbitrum.Reader
]
]
]

@ -29,6 +29,15 @@ defmodule EthereumJSONRPC.Block do
]
)
:arbitrum ->
@chain_type_fields quote(
do: [
send_count: non_neg_integer(),
send_root: EthereumJSONRPC.hash(),
l1_block_number: non_neg_integer()
]
)
_ ->
@chain_type_fields quote(do: [])
end
@ -172,6 +181,11 @@ defmodule EthereumJSONRPC.Block do
"blobGasUsed" => 262144,\
"excessBlobGas" => 79429632,\
"""
:arbitrum -> """
"sendRoot" => "0xc71ee2cf4201f65590aa6c052270dc41e926e628f213e268a58d9a8d8f739f82",\
"sendCount" => 91,\
"l1BlockNumber" => 19828534,\
"""
_ -> ""
end}
...> "uncles" => []
@ -209,6 +223,11 @@ defmodule EthereumJSONRPC.Block do
blob_gas_used: 262144,\
excess_blob_gas: 79429632,\
"""
:arbitrum -> """
send_root: "0xc71ee2cf4201f65590aa6c052270dc41e926e628f213e268a58d9a8d8f739f82",\
send_count: 91,\
l1_block_number: 19828534,\
"""
_ -> ""
end}
uncles: []
@ -272,6 +291,11 @@ defmodule EthereumJSONRPC.Block do
blob_gas_used: 0,\
excess_blob_gas: 0,\
"""
:arbitrum -> """
send_root: nil,\
send_count: nil,\
l1_block_number: nil,\
"""
_ -> ""
end}
uncles: []
@ -461,9 +485,9 @@ defmodule EthereumJSONRPC.Block do
}
end
defp chain_type_fields(params, elixir) do
case Application.get_env(:explorer, :chain_type) do
:rsk ->
case Application.compile_env(:explorer, :chain_type) do
:rsk ->
defp chain_type_fields(params, elixir) do
params
|> Map.merge(%{
minimum_gas_price: Map.get(elixir, "minimumGasPrice"),
@ -472,8 +496,10 @@ defmodule EthereumJSONRPC.Block do
bitcoin_merged_mining_merkle_proof: Map.get(elixir, "bitcoinMergedMiningMerkleProof"),
hash_for_merged_mining: Map.get(elixir, "hashForMergedMining")
})
end
:ethereum ->
:ethereum ->
defp chain_type_fields(params, elixir) do
params
|> Map.merge(%{
withdrawals_root:
@ -481,10 +507,20 @@ defmodule EthereumJSONRPC.Block do
blob_gas_used: Map.get(elixir, "blobGasUsed", 0),
excess_blob_gas: Map.get(elixir, "excessBlobGas", 0)
})
end
_ ->
:arbitrum ->
defp chain_type_fields(params, elixir) do
params
end
|> Map.merge(%{
send_count: Map.get(elixir, "sendCount"),
send_root: Map.get(elixir, "sendRoot"),
l1_block_number: Map.get(elixir, "l1BlockNumber")
})
end
_ ->
defp chain_type_fields(params, _), do: params
end
@doc """
@ -790,7 +826,9 @@ defmodule EthereumJSONRPC.Block do
end
defp entry_to_elixir({key, quantity}, _block)
when key in ~w(difficulty gasLimit gasUsed minimumGasPrice baseFeePerGas number size cumulativeDifficulty totalDifficulty paidFees minimumGasPrice blobGasUsed excessBlobGas) and
when key in ~w(difficulty gasLimit gasUsed minimumGasPrice baseFeePerGas number size
cumulativeDifficulty totalDifficulty paidFees minimumGasPrice blobGasUsed
excessBlobGas l1BlockNumber sendCount) and
not is_nil(quantity) do
{key, quantity_to_integer(quantity)}
end
@ -804,8 +842,10 @@ defmodule EthereumJSONRPC.Block do
# `t:EthereumJSONRPC.address/0` and `t:EthereumJSONRPC.hash/0` pass through as `Explorer.Chain` can verify correct
# hash format
defp entry_to_elixir({key, _} = entry, _block)
when key in ~w(author extraData hash logsBloom miner mixHash nonce parentHash receiptsRoot sealFields sha3Uncles
signature stateRoot step transactionsRoot uncles withdrawalsRoot bitcoinMergedMiningHeader bitcoinMergedMiningCoinbaseTransaction bitcoinMergedMiningMerkleProof hashForMergedMining),
when key in ~w(author extraData hash logsBloom miner mixHash nonce parentHash receiptsRoot
sealFields sha3Uncles signature stateRoot step transactionsRoot uncles
withdrawalsRoot bitcoinMergedMiningHeader bitcoinMergedMiningCoinbaseTransaction
bitcoinMergedMiningMerkleProof hashForMergedMining sendRoot),
do: entry
defp entry_to_elixir({"timestamp" = key, timestamp}, _block) do
@ -825,11 +865,6 @@ defmodule EthereumJSONRPC.Block do
{key, Withdrawals.to_elixir(withdrawals, block_hash, quantity_to_integer(block_number))}
end
# Arbitrum fields
defp entry_to_elixir({"l1BlockNumber", _}, _block) do
{:ignore, :ignore}
end
# bitcoinMergedMiningCoinbaseTransaction bitcoinMergedMiningHeader bitcoinMergedMiningMerkleProof hashForMergedMining - RSK https://github.com/blockscout/blockscout/pull/2934
# committedSeals committee pastCommittedSeals proposerSeal round - Autonity network https://github.com/blockscout/blockscout/pull/3480
# blockGasCost extDataGasUsed - sgb/ava https://github.com/blockscout/blockscout/pull/5301

@ -129,6 +129,11 @@ defmodule EthereumJSONRPC.Blocks do
blob_gas_used: 0,\
excess_blob_gas: 0,\
"""
:arbitrum -> """
send_root: nil,\
send_count: nil,\
l1_block_number: nil,\
"""
_ -> ""
end}
uncles: ["0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d15273311"]

@ -163,11 +163,34 @@ defmodule EthereumJSONRPC.HTTP do
{:error, resp}
end
# restrict response to only those fields supported by the JSON-RPC 2.0 standard, which means that level of keys is
# validated, so we can indicate that with switch to atom keys.
def standardize_response(%{"jsonrpc" => "2.0" = jsonrpc, "id" => id} = unstandardized) do
@doc """
Standardizes responses to adhere to the JSON-RPC 2.0 standard.
This function adjusts responses to conform to JSON-RPC 2.0, ensuring the keys are atom-based
and that 'id', 'jsonrpc', 'result', and 'error' fields meet the protocol's requirements.
It also validates the mutual exclusivity of 'result' and 'error' fields within a response.
## Parameters
- `unstandardized`: A map representing the response with string keys.
## Returns
- A standardized map with atom keys and fields aligned with the JSON-RPC 2.0 standard, including
handling of possible mutual exclusivity errors between 'result' and 'error' fields.
"""
@spec standardize_response(map()) :: %{
:id => nil | non_neg_integer(),
:jsonrpc => binary(),
optional(:error) => %{:code => integer(), :message => binary(), optional(:data) => any()},
optional(:result) => any()
}
def standardize_response(%{"jsonrpc" => "2.0" = jsonrpc} = unstandardized) do
# Avoid extracting `id` directly in the function declaration. Some endpoints
# do not adhere to standards and may omit the `id` in responses related to
# error scenarios. Consequently, the function call would fail during input
# argument matching.
# Nethermind return string ids
id = quantity_to_integer(id)
id = quantity_to_integer(unstandardized["id"])
standardized = %{jsonrpc: jsonrpc, id: id}
@ -187,8 +210,21 @@ defmodule EthereumJSONRPC.HTTP do
end
end
# restrict error to only those fields supported by the JSON-RPC 2.0 standard, which means that level of keys is
# validated, so we can indicate that with switch to atom keys.
@doc """
Standardizes error responses to adhere to the JSON-RPC 2.0 standard.
This function converts a map containing error information into a format compliant
with the JSON-RPC 2.0 specification. It ensures the keys are atom-based and checks
for the presence of optional 'data' field, incorporating it if available.
## Parameters
- `unstandardized`: A map representing the error with string keys: "code", "message"
and "data" (optional).
## Returns
- A standardized map with keys as atoms and fields aligned with the JSON-RPC 2.0 standard.
"""
@spec standardize_error(map()) :: %{:code => integer(), :message => binary(), optional(:data) => any()}
def standardize_error(%{"code" => code, "message" => message} = unstandardized)
when is_integer(code) and is_binary(message) do
standardized = %{code: code, message: message}

@ -8,6 +8,36 @@ defmodule EthereumJSONRPC.Receipt do
alias EthereumJSONRPC.Logs
case Application.compile_env(:explorer, :chain_type) do
:ethereum ->
@chain_type_fields quote(
do: [
blob_gas_price: non_neg_integer(),
blob_gas_used: non_neg_integer()
]
)
:optimism ->
@chain_type_fields quote(
do: [
l1_fee: non_neg_integer(),
l1_fee_scalar: non_neg_integer(),
l1_gas_price: non_neg_integer(),
l1_gas_used: non_neg_integer()
]
)
:arbitrum ->
@chain_type_fields quote(
do: [
gas_used_for_l1: non_neg_integer()
]
)
_ ->
@chain_type_fields quote(do: [])
end
@type elixir :: %{String.t() => String.t() | non_neg_integer}
@typedoc """
@ -38,11 +68,16 @@ defmodule EthereumJSONRPC.Receipt do
| nil
}
@typedoc """
* `:ok` - transaction succeeded
* `:error` - transaction failed
"""
@type status :: :ok | :error
@type params :: %{
unquote_splicing(@chain_type_fields),
optional(:gas_price) => non_neg_integer(),
cumulative_gas_used: non_neg_integer(),
gas_used: non_neg_integer(),
created_contract_address_hash: EthereumJSONRPC.hash(),
status: :ok | :error,
transaction_hash: EthereumJSONRPC.hash(),
transaction_index: non_neg_integer()
}
@doc """
Get `t:EthereumJSONRPC.Logs.elixir/0` from `t:elixir/0`
@ -86,6 +121,9 @@ defmodule EthereumJSONRPC.Receipt do
l1_gas_price: 0,\
l1_gas_used: 0\
"""
:arbitrum -> """
gas_used_for_l1: nil\
"""
_ -> ""
end}
}
@ -132,20 +170,15 @@ defmodule EthereumJSONRPC.Receipt do
l1_gas_price: 0,\
l1_gas_used: 0\
"""
:arbitrum -> """
gas_used_for_l1: nil\
"""
_ -> ""
end}
}
"""
@spec elixir_to_params(elixir) :: %{
optional(:gas_price) => non_neg_integer(),
cumulative_gas_used: non_neg_integer,
gas_used: non_neg_integer,
created_contract_address_hash: String.t() | nil,
status: status(),
transaction_hash: String.t(),
transaction_index: non_neg_integer()
}
@spec elixir_to_params(elixir) :: params
def elixir_to_params(elixir) do
elixir
|> do_elixir_to_params()
@ -184,16 +217,18 @@ defmodule EthereumJSONRPC.Receipt do
defp maybe_append_gas_price(params, _), do: params
defp chain_type_fields(params, elixir) do
case Application.get_env(:explorer, :chain_type) do
:ethereum ->
case Application.compile_env(:explorer, :chain_type) do
:ethereum ->
defp chain_type_fields(params, elixir) do
params
|> Map.merge(%{
blob_gas_price: Map.get(elixir, "blobGasPrice", 0),
blob_gas_used: Map.get(elixir, "blobGasUsed", 0)
})
end
:optimism ->
:optimism ->
defp chain_type_fields(params, elixir) do
params
|> Map.merge(%{
l1_fee: Map.get(elixir, "l1Fee", 0),
@ -201,10 +236,18 @@ defmodule EthereumJSONRPC.Receipt do
l1_gas_price: Map.get(elixir, "l1GasPrice", 0),
l1_gas_used: Map.get(elixir, "l1GasUsed", 0)
})
end
_ ->
:arbitrum ->
defp chain_type_fields(params, elixir) do
params
end
|> Map.merge(%{
gas_used_for_l1: Map.get(elixir, "gasUsedForL1")
})
end
_ ->
defp chain_type_fields(params, _), do: params
end
@doc """
@ -320,11 +363,13 @@ defmodule EthereumJSONRPC.Receipt do
# hash format
# gas is passed in from the `t:EthereumJSONRPC.Transaction.params/0` to allow pre-Byzantium status to be derived
defp entry_to_elixir({key, _} = entry)
when key in ~w(blockHash contractAddress from gas logsBloom root to transactionHash revertReason type l1FeeScalar),
when key in ~w(blockHash contractAddress from gas logsBloom root to transactionHash
revertReason type l1FeeScalar),
do: {:ok, entry}
defp entry_to_elixir({key, quantity})
when key in ~w(blockNumber cumulativeGasUsed gasUsed transactionIndex blobGasUsed blobGasPrice l1Fee l1GasPrice l1GasUsed effectiveGasPrice) do
when key in ~w(blockNumber cumulativeGasUsed gasUsed transactionIndex blobGasUsed
blobGasPrice l1Fee l1GasPrice l1GasUsed effectiveGasPrice gasUsedForL1) do
result =
if is_nil(quantity) do
nil
@ -367,7 +412,7 @@ defmodule EthereumJSONRPC.Receipt do
end
# Arbitrum fields
defp entry_to_elixir({key, _}) when key in ~w(returnData returnCode feeStats l1BlockNumber gasUsedForL1) do
defp entry_to_elixir({key, _}) when key in ~w(returnData returnCode feeStats l1BlockNumber) do
:ignore
end

@ -111,6 +111,9 @@ defmodule EthereumJSONRPC.Receipts do
l1_gas_price: 0,\
l1_gas_used: 0\
"""
:arbitrum -> """
gas_used_for_l1: nil\
"""
_ -> ""
end}
}

@ -48,6 +48,13 @@ defmodule EthereumJSONRPC.Transaction do
]
)
:arbitrum ->
@chain_type_fields quote(
do: [
request_id: non_neg_integer()
]
)
_ ->
@chain_type_fields quote(do: [])
end
@ -509,6 +516,11 @@ defmodule EthereumJSONRPC.Transaction do
})
end
:arbitrum ->
put_if_present(params, elixir, [
{"requestId", :request_id}
])
_ ->
params
end
@ -631,7 +643,7 @@ defmodule EthereumJSONRPC.Transaction do
do: {"input", value}
defp entry_to_elixir({key, quantity})
when key in ~w(gas gasPrice nonce r s standardV v value type maxPriorityFeePerGas maxFeePerGas maxFeePerBlobGas) and
when key in ~w(gas gasPrice nonce r s standardV v value type maxPriorityFeePerGas maxFeePerGas maxFeePerBlobGas requestId) and
quantity != nil do
{key, quantity_to_integer(quantity)}
end

@ -6,7 +6,7 @@ defmodule EthereumJSONRPC.BlockTest do
alias EthereumJSONRPC.Block
describe "elixir_to_params/1" do
test "sets totalDifficulty to nil if it's empty" do
test "sets default values for params (incl. nil)" do
result =
Block.elixir_to_params(%{
"difficulty" => 17_561_410_778,
@ -55,32 +55,38 @@ defmodule EthereumJSONRPC.BlockTest do
transactions_root: "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
uncles: []
}
|> (&if(Application.get_env(:explorer, :chain_type) == :rsk,
do:
Map.merge(
&1,
%{
bitcoin_merged_mining_coinbase_transaction: nil,
bitcoin_merged_mining_header: nil,
bitcoin_merged_mining_merkle_proof: nil,
hash_for_merged_mining: nil,
minimum_gas_price: nil
}
),
else: &1
)).()
|> (&if(Application.get_env(:explorer, :chain_type) == :ethereum,
do:
Map.merge(
&1,
%{
withdrawals_root: "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
blob_gas_used: 0,
excess_blob_gas: 0
}
),
else: &1
)).()
|> Map.merge(chain_type_fields())
end
case Application.compile_env(:explorer, :chain_type) do
:rsk ->
defp chain_type_fields,
do: %{
bitcoin_merged_mining_coinbase_transaction: nil,
bitcoin_merged_mining_header: nil,
bitcoin_merged_mining_merkle_proof: nil,
hash_for_merged_mining: nil,
minimum_gas_price: nil
}
:ethereum ->
defp chain_type_fields,
do: %{
withdrawals_root: "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
blob_gas_used: 0,
excess_blob_gas: 0
}
:arbitrum ->
defp chain_type_fields,
do: %{
send_root: nil,
send_count: nil,
l1_block_number: nil
}
_ ->
defp chain_type_fields, do: %{}
end
end

@ -31,6 +31,9 @@ config :explorer, Explorer.Repo.Suave, timeout: :timer.seconds(80)
config :explorer, Explorer.Repo.Beacon, timeout: :timer.seconds(80)
# Configure Arbitrum database
config :explorer, Explorer.Repo.Arbitrum, timeout: :timer.seconds(80)
config :explorer, Explorer.Repo.BridgedTokens, timeout: :timer.seconds(80)
config :explorer, Explorer.Repo.Filecoin, timeout: :timer.seconds(80)

@ -48,6 +48,10 @@ config :explorer, Explorer.Repo.Beacon,
prepare: :unnamed,
timeout: :timer.seconds(60)
config :explorer, Explorer.Repo.Arbitrum,
prepare: :unnamed,
timeout: :timer.seconds(60)
config :explorer, Explorer.Repo.BridgedTokens,
prepare: :unnamed,
timeout: :timer.seconds(60)

@ -62,6 +62,7 @@ for repo <- [
Explorer.Repo.RSK,
Explorer.Repo.Shibarium,
Explorer.Repo.Suave,
Explorer.Repo.Arbitrum,
Explorer.Repo.BridgedTokens,
Explorer.Repo.Filecoin,
Explorer.Repo.Stability,

@ -155,6 +155,7 @@ defmodule Explorer.Application do
Explorer.Repo.RSK,
Explorer.Repo.Shibarium,
Explorer.Repo.Suave,
Explorer.Repo.Arbitrum,
Explorer.Repo.BridgedTokens,
Explorer.Repo.Filecoin,
Explorer.Repo.Stability

@ -1731,6 +1731,20 @@ defmodule Explorer.Chain do
|> Enum.into(%{})
end
@doc """
Retrieves the total row count for a given table.
This function estimates the row count using system catalogs. If the estimate
is unavailable, it performs an exact count using an aggregate query.
## Parameters
- `module`: The module representing the table schema.
- `options`: An optional keyword list of options, such as selecting a specific repository.
## Returns
- The total row count as a non-negative integer.
"""
@spec get_table_rows_total_count(atom(), keyword()) :: non_neg_integer()
def get_table_rows_total_count(module, options) do
table_name = module.__schema__(:source)
@ -2468,6 +2482,21 @@ defmodule Explorer.Chain do
end
end
@doc """
Finds the block number closest to a given timestamp, with a one-minute buffer, optionally
adjusting based on whether the block should be before or after the timestamp.
## Parameters
- `given_timestamp`: The timestamp for which the closest block number is being sought.
- `closest`: A direction indicator (`:before` or `:after`) specifying whether the block number
returned should be before or after the given timestamp.
- `from_api`: A boolean flag indicating whether to use the replica database or the primary one
for the query.
## Returns
- `{:ok, block_number}` where `block_number` is the block number closest to the specified timestamp.
- `{:error, :not_found}` if no block is found within the specified criteria.
"""
@spec timestamp_to_block_number(DateTime.t(), :before | :after, boolean()) ::
{:ok, Block.block_number()} | {:error, :not_found}
def timestamp_to_block_number(given_timestamp, closest, from_api) do
@ -3311,6 +3340,22 @@ defmodule Explorer.Chain do
def limit_showing_transactions, do: @limit_showing_transactions
@doc """
Dynamically joins and preloads associations in a query based on necessity.
This function adjusts the provided Ecto query to include joins for associations. It supports
both optional and required joins. Optional joins use the `preload` function to fetch associations
without enforcing their presence. Required joins ensure the association exists.
## Parameters
- `query`: The initial Ecto query.
- `associations`: A single association or a tuple with nested association preloads.
- `necessity`: Specifies if the association is `:optional` or `:required`.
## Returns
- The modified query with the specified associations joined according to the defined necessity.
"""
@spec join_association(atom() | Ecto.Query.t(), [{atom(), atom()}], :optional | :required) :: Ecto.Query.t()
def join_association(query, [{association, nested_preload}], necessity)
when is_atom(association) and is_atom(nested_preload) do
case necessity do
@ -3328,6 +3373,7 @@ defmodule Explorer.Chain do
end
end
@spec join_association(atom() | Ecto.Query.t(), atom(), :optional | :required) :: Ecto.Query.t()
def join_association(query, association, necessity) do
case necessity do
:optional ->
@ -3338,10 +3384,23 @@ defmodule Explorer.Chain do
end
end
@spec join_associations(atom() | Ecto.Query.t(), map) :: Ecto.Query.t()
@doc """
Function to preload entities associated with selected in provided query items
Applies dynamic joins to a query based on provided association necessities.
This function iterates over a map of associations with their required join types, either
`:optional` or `:required`, and applies the corresponding joins to the given query.
More info is available on https://hexdocs.pm/ecto/Ecto.Query.html#preload/3
## Parameters
- `query`: The base query to which associations will be joined.
- `necessity_by_association`: A map specifying each association and its necessity
(`:optional` or `:required`).
## Returns
- The query with all specified associations joined according to their necessity.
"""
@spec join_associations(atom() | Ecto.Query.t(), %{any() => :optional | :required}) :: Ecto.Query.t()
def join_associations(query, necessity_by_association) when is_map(necessity_by_association) do
Enum.reduce(necessity_by_association, query, fn {association, join}, acc_query ->
join_association(acc_query, association, join)

@ -0,0 +1,53 @@
defmodule Explorer.Chain.Arbitrum.BatchBlock do
@moduledoc """
Models a list of blocks related to a batch for Arbitrum.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Arbitrum.BatchBlocks
Migrations:
- Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables
"""
use Explorer.Schema
alias Explorer.Chain.Arbitrum.{L1Batch, LifecycleTransaction}
@optional_attrs ~w(confirmation_id)a
@required_attrs ~w(batch_number block_number)a
@type t :: %__MODULE__{
batch_number: non_neg_integer(),
batch: %Ecto.Association.NotLoaded{} | L1Batch.t() | nil,
block_number: non_neg_integer(),
confirmation_id: non_neg_integer() | nil,
confirmation_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil
}
@primary_key {:block_number, :integer, autogenerate: false}
schema "arbitrum_batch_l2_blocks" do
belongs_to(:batch, L1Batch, foreign_key: :batch_number, references: :number, type: :integer)
belongs_to(:confirmation_transaction, LifecycleTransaction,
foreign_key: :confirmation_id,
references: :id,
type: :integer
)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = items, attrs \\ %{}) do
items
|> cast(attrs, @required_attrs ++ @optional_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:batch_number)
|> foreign_key_constraint(:confirmation_id)
|> unique_constraint(:block_number)
end
end

@ -0,0 +1,52 @@
defmodule Explorer.Chain.Arbitrum.BatchTransaction do
@moduledoc """
Models a list of transactions related to a batch for Arbitrum.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Arbitrum.BatchTransactions
Migrations:
- Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables
"""
use Explorer.Schema
alias Explorer.Chain.Arbitrum.L1Batch
alias Explorer.Chain.{Hash, Transaction}
@required_attrs ~w(batch_number tx_hash)a
@type t :: %__MODULE__{
batch_number: non_neg_integer(),
batch: %Ecto.Association.NotLoaded{} | L1Batch.t() | nil,
tx_hash: Hash.t(),
l2_transaction: %Ecto.Association.NotLoaded{} | Transaction.t() | nil
}
@primary_key false
schema "arbitrum_batch_l2_transactions" do
belongs_to(:batch, L1Batch, foreign_key: :batch_number, references: :number, type: :integer)
belongs_to(:l2_transaction, Transaction,
foreign_key: :tx_hash,
primary_key: true,
references: :hash,
type: Hash.Full
)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = transactions, attrs \\ %{}) do
transactions
|> cast(attrs, @required_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:batch_number)
|> foreign_key_constraint(:block_hash)
|> unique_constraint(:tx_hash)
end
end

@ -0,0 +1,62 @@
defmodule Explorer.Chain.Arbitrum.L1Batch do
@moduledoc """
Models an L1 batch for Arbitrum.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Arbitrum.L1Batches
Migrations:
- Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables
"""
use Explorer.Schema
alias Explorer.Chain.{
Block,
Hash
}
alias Explorer.Chain.Arbitrum.LifecycleTransaction
@required_attrs ~w(number transactions_count start_block end_block before_acc after_acc commitment_id)a
@type t :: %__MODULE__{
number: non_neg_integer(),
transactions_count: non_neg_integer(),
start_block: Block.block_number(),
end_block: Block.block_number(),
before_acc: Hash.t(),
after_acc: Hash.t(),
commitment_id: non_neg_integer(),
commitment_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil
}
@primary_key {:number, :integer, autogenerate: false}
schema "arbitrum_l1_batches" do
field(:transactions_count, :integer)
field(:start_block, :integer)
field(:end_block, :integer)
field(:before_acc, Hash.Full)
field(:after_acc, Hash.Full)
belongs_to(:commitment_transaction, LifecycleTransaction,
foreign_key: :commitment_id,
references: :id,
type: :integer
)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = batches, attrs \\ %{}) do
batches
|> cast(attrs, @required_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:commitment_id)
|> unique_constraint(:number)
end
end

@ -0,0 +1,46 @@
defmodule Explorer.Chain.Arbitrum.L1Execution do
@moduledoc """
Models a list of execution transactions related to a L2 to L1 messages on Arbitrum.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Arbitrum.L1Executions
Migrations:
- Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables
"""
use Explorer.Schema
alias Explorer.Chain.Arbitrum.LifecycleTransaction
@required_attrs ~w(message_id execution_id)a
@type t :: %__MODULE__{
message_id: non_neg_integer(),
execution_id: non_neg_integer(),
execution_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil
}
@primary_key {:message_id, :integer, autogenerate: false}
schema "arbitrum_l1_executions" do
belongs_to(:execution_transaction, LifecycleTransaction,
foreign_key: :execution_id,
references: :id,
type: :integer
)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = items, attrs \\ %{}) do
items
|> cast(attrs, @required_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:execution_id)
|> unique_constraint(:message_id)
end
end

@ -0,0 +1,54 @@
defmodule Explorer.Chain.Arbitrum.LifecycleTransaction do
@moduledoc """
Models an L1 lifecycle transaction for Arbitrum.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Arbitrum.LifecycleTransactions
Migrations:
- Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables
"""
use Explorer.Schema
alias Explorer.Chain.{
Block,
Hash
}
alias Explorer.Chain.Arbitrum.{BatchBlock, L1Batch}
@required_attrs ~w(id hash block_number timestamp status)a
@type t :: %__MODULE__{
id: non_neg_integer(),
hash: Hash.t(),
block_number: Block.block_number(),
timestamp: DateTime.t(),
status: String.t()
}
@primary_key {:id, :integer, autogenerate: false}
schema "arbitrum_lifecycle_l1_transactions" do
field(:hash, Hash.Full)
field(:block_number, :integer)
field(:timestamp, :utc_datetime_usec)
field(:status, Ecto.Enum, values: [:unfinalized, :finalized])
has_many(:committed_batches, L1Batch, foreign_key: :commitment_id)
has_many(:confirmed_blocks, BatchBlock, foreign_key: :confirmation_id)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = txn, attrs \\ %{}) do
txn
|> cast(attrs, @required_attrs)
|> validate_required(@required_attrs)
|> unique_constraint([:id, :hash])
end
end

@ -0,0 +1,57 @@
defmodule Explorer.Chain.Arbitrum.Message do
@moduledoc """
Models an L1<->L2 messages on Arbitrum.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Arbitrum.Messages
Migrations:
- Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables
"""
use Explorer.Schema
alias Explorer.Chain.{Block, Hash}
@optional_attrs ~w(originator_address originating_transaction_hash origination_timestamp originating_transaction_block_number completion_transaction_hash)a
@required_attrs ~w(direction message_id status)a
@allowed_attrs @optional_attrs ++ @required_attrs
@type t :: %__MODULE__{
direction: String.t(),
message_id: non_neg_integer(),
originator_address: Hash.Address.t() | nil,
originating_transaction_hash: Hash.t() | nil,
origination_timestamp: DateTime.t() | nil,
originating_transaction_block_number: Block.block_number() | nil,
completion_transaction_hash: Hash.t() | nil,
status: String.t()
}
@primary_key false
schema "arbitrum_crosslevel_messages" do
field(:direction, Ecto.Enum, values: [:to_l2, :from_l2], primary_key: true)
field(:message_id, :integer, primary_key: true)
field(:originator_address, Hash.Address)
field(:originating_transaction_hash, Hash.Full)
field(:origination_timestamp, :utc_datetime_usec)
field(:originating_transaction_block_number, :integer)
field(:completion_transaction_hash, Hash.Full)
field(:status, Ecto.Enum, values: [:initiated, :sent, :confirmed, :relayed])
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = txn, attrs \\ %{}) do
txn
|> cast(attrs, @allowed_attrs)
|> validate_required(@required_attrs)
|> unique_constraint([:direction, :message_id])
end
end

@ -0,0 +1,913 @@
defmodule Explorer.Chain.Arbitrum.Reader do
@moduledoc """
Contains read functions for Arbitrum modules.
"""
import Ecto.Query, only: [from: 2, limit: 2, order_by: 2, subquery: 1, where: 2, where: 3]
import Explorer.Chain, only: [select_repo: 1]
alias Explorer.Chain.Arbitrum.{BatchBlock, BatchTransaction, L1Batch, L1Execution, LifecycleTransaction, Message}
alias Explorer.{Chain, PagingOptions, Repo}
alias Explorer.Chain.Block, as: FullBlock
alias Explorer.Chain.{Hash, Transaction}
@doc """
Retrieves the number of the latest L1 block where an L1-to-L2 message was discovered.
## Returns
- The number of L1 block, or `nil` if no L1-to-L2 messages are found.
"""
@spec l1_block_of_latest_discovered_message_to_l2() :: FullBlock.block_number() | nil
def l1_block_of_latest_discovered_message_to_l2 do
query =
from(msg in Message,
select: msg.originating_transaction_block_number,
where: msg.direction == :to_l2 and not is_nil(msg.originating_transaction_block_number),
order_by: [desc: msg.message_id],
limit: 1
)
query
|> Repo.one()
end
@doc """
Retrieves the number of the earliest L1 block where an L1-to-L2 message was discovered.
## Returns
- The number of L1 block, or `nil` if no L1-to-L2 messages are found.
"""
@spec l1_block_of_earliest_discovered_message_to_l2() :: FullBlock.block_number() | nil
def l1_block_of_earliest_discovered_message_to_l2 do
query =
from(msg in Message,
select: msg.originating_transaction_block_number,
where: msg.direction == :to_l2 and not is_nil(msg.originating_transaction_block_number),
order_by: [asc: msg.message_id],
limit: 1
)
query
|> Repo.one()
end
@doc """
Retrieves the number of the earliest rollup block where an L2-to-L1 message was discovered.
## Returns
- The number of rollup block, or `nil` if no L2-to-L1 messages are found.
"""
@spec rollup_block_of_earliest_discovered_message_from_l2() :: FullBlock.block_number() | nil
def rollup_block_of_earliest_discovered_message_from_l2 do
query =
from(msg in Message,
select: msg.originating_transaction_block_number,
where: msg.direction == :from_l2 and not is_nil(msg.originating_transaction_block_number),
order_by: [asc: msg.originating_transaction_block_number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Retrieves the number of the earliest rollup block where a completed L1-to-L2 message was discovered.
## Returns
- The block number of the rollup block, or `nil` if no completed L1-to-L2 messages are found,
or if the rollup transaction that emitted the corresponding message has not been indexed yet.
"""
@spec rollup_block_of_earliest_discovered_message_to_l2() :: FullBlock.block_number() | nil
def rollup_block_of_earliest_discovered_message_to_l2 do
completion_tx_subquery =
from(msg in Message,
select: msg.completion_transaction_hash,
where: msg.direction == :to_l2 and not is_nil(msg.completion_transaction_hash),
order_by: [asc: msg.message_id],
limit: 1
)
query =
from(tx in Transaction,
select: tx.block_number,
where: tx.hash == subquery(completion_tx_subquery),
limit: 1
)
query
|> Repo.one()
end
@doc """
Retrieves the number of the latest L1 block where the commitment transaction with a batch was included.
As per the Arbitrum rollup nature, from the indexer's point of view, a batch does not exist until
the commitment transaction is submitted to L1. Therefore, the situation where a batch exists but
there is no commitment transaction is not possible.
## Returns
- The number of the L1 block, or `nil` if no rollup batches are found, or if the association between the batch
and the commitment transaction has been broken due to database inconsistency.
"""
@spec l1_block_of_latest_committed_batch() :: FullBlock.block_number() | nil
def l1_block_of_latest_committed_batch do
query =
from(batch in L1Batch,
order_by: [desc: batch.number],
limit: 1
)
case query
# :required is used since the situation when commit transaction is not found is not possible
|> Chain.join_associations(%{:commitment_transaction => :required})
|> Repo.one() do
nil -> nil
batch -> batch.commitment_transaction.block_number
end
end
@doc """
Retrieves the number of the earliest L1 block where the commitment transaction with a batch was included.
As per the Arbitrum rollup nature, from the indexer's point of view, a batch does not exist until
the commitment transaction is submitted to L1. Therefore, the situation where a batch exists but
there is no commitment transaction is not possible.
## Returns
- The number of the L1 block, or `nil` if no rollup batches are found, or if the association between the batch
and the commitment transaction has been broken due to database inconsistency.
"""
@spec l1_block_of_earliest_committed_batch() :: FullBlock.block_number() | nil
def l1_block_of_earliest_committed_batch do
query =
from(batch in L1Batch,
order_by: [asc: batch.number],
limit: 1
)
case query
# :required is used since the situation when commit transaction is not found is not possible
|> Chain.join_associations(%{:commitment_transaction => :required})
|> Repo.one() do
nil -> nil
batch -> batch.commitment_transaction.block_number
end
end
@doc """
Retrieves the block number of the highest rollup block that has been included in a batch.
## Returns
- The number of the highest rollup block included in a batch, or `nil` if no rollup batches are found.
"""
@spec highest_committed_block() :: FullBlock.block_number() | nil
def highest_committed_block do
query =
from(batch in L1Batch,
select: batch.end_block,
order_by: [desc: batch.number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Reads a list of L1 transactions by their hashes from the `arbitrum_lifecycle_l1_transactions` table.
## Parameters
- `l1_tx_hashes`: A list of hashes to retrieve L1 transactions for.
## Returns
- A list of `Explorer.Chain.Arbitrum.LifecycleTransaction` corresponding to the hashes from
the input list. The output list may be smaller than the input list.
"""
@spec lifecycle_transactions(maybe_improper_list(Hash.t(), [])) :: [LifecycleTransaction]
def lifecycle_transactions(l1_tx_hashes) when is_list(l1_tx_hashes) do
query =
from(
lt in LifecycleTransaction,
select: {lt.hash, lt.id},
where: lt.hash in ^l1_tx_hashes
)
Repo.all(query, timeout: :infinity)
end
@doc """
Reads a list of transactions executing L2-to-L1 messages by their IDs.
## Parameters
- `message_ids`: A list of IDs to retrieve executing transactions for.
## Returns
- A list of `Explorer.Chain.Arbitrum.L1Execution` corresponding to the message IDs from
the input list. The output list may be smaller than the input list if some IDs do not
correspond to any existing transactions.
"""
@spec l1_executions(maybe_improper_list(non_neg_integer(), [])) :: [L1Execution]
def l1_executions(message_ids) when is_list(message_ids) do
query =
from(
ex in L1Execution,
where: ex.message_id in ^message_ids
)
query
# :required is used since execution records in the table are created only when
# the corresponding execution transaction is indexed
|> Chain.join_associations(%{:execution_transaction => :required})
|> Repo.all(timeout: :infinity)
end
@doc """
Determines the next index for the L1 transaction available in the `arbitrum_lifecycle_l1_transactions` table.
## Returns
- The next available index. If there are no L1 transactions imported yet, it will return `1`.
"""
@spec next_lifecycle_transaction_id() :: non_neg_integer
def next_lifecycle_transaction_id do
query =
from(lt in LifecycleTransaction,
select: lt.id,
order_by: [desc: lt.id],
limit: 1
)
last_id =
query
|> Repo.one()
|> Kernel.||(0)
last_id + 1
end
@doc """
Retrieves unfinalized L1 transactions from the `LifecycleTransaction` table that are
involved in changing the statuses of rollup blocks or transactions.
An L1 transaction is considered unfinalized if it has not yet reached a state where
it is permanently included in the blockchain, meaning it is still susceptible to
potential reorganization or change. Transactions are evaluated against the `finalized_block`
parameter to determine their finalized status.
## Parameters
- `finalized_block`: The L1 block number above which transactions are considered finalized.
Transactions in blocks higher than this number are not included in the results.
## Returns
- A list of `Explorer.Chain.Arbitrum.LifecycleTransaction` representing unfinalized transactions,
or `[]` if no unfinalized transactions are found.
"""
@spec lifecycle_unfinalized_transactions(FullBlock.block_number()) :: [LifecycleTransaction]
def lifecycle_unfinalized_transactions(finalized_block)
when is_integer(finalized_block) and finalized_block >= 0 do
query =
from(
lt in LifecycleTransaction,
where: lt.block_number <= ^finalized_block and lt.status == :unfinalized
)
Repo.all(query, timeout: :infinity)
end
@doc """
Gets the rollup block number by the hash of the block. Lookup is performed only
for blocks explicitly included in a batch, i.e., the batch has been identified by
the corresponding fetcher. The function may return `nil` as a successful response
if the batch containing the rollup block has not been indexed yet.
## Parameters
- `block_hash`: The hash of a block included in the batch.
## Returns
- the number of the rollup block corresponding to the given hash or `nil` if the
block or batch were not indexed yet.
"""
@spec rollup_block_hash_to_num(binary()) :: FullBlock.block_number() | nil
def rollup_block_hash_to_num(block_hash) when is_binary(block_hash) do
query =
from(
fb in FullBlock,
inner_join: rb in BatchBlock,
on: rb.block_number == fb.number,
select: fb.number,
where: fb.hash == ^block_hash
)
query
|> Repo.one()
end
@doc """
Checks if the numbers from the provided list correspond to the numbers of indexed batches.
## Parameters
- `batches_numbers`: The list of batch numbers.
## Returns
- A list of batch numbers that are indexed and match the provided list, or `[]`
if none of the batch numbers in the provided list exist in the database. The output list
may be smaller than the input list.
"""
@spec batches_exist(maybe_improper_list(non_neg_integer(), [])) :: [non_neg_integer]
def batches_exist(batches_numbers) when is_list(batches_numbers) do
query =
from(
batch in L1Batch,
select: batch.number,
where: batch.number in ^batches_numbers
)
query
|> Repo.all(timeout: :infinity)
end
@doc """
Retrieves the batch in which the rollup block, identified by the given block number, was included.
## Parameters
- `number`: The number of a rollup block.
## Returns
- An instance of `Explorer.Chain.Arbitrum.L1Batch` representing the batch containing
the specified rollup block number, or `nil` if no corresponding batch is found.
"""
@spec get_batch_by_rollup_block_number(FullBlock.block_number()) :: L1Batch | nil
def get_batch_by_rollup_block_number(number)
when is_integer(number) and number >= 0 do
query =
from(batch in L1Batch,
# end_block has higher number than start_block
where: batch.end_block >= ^number and batch.start_block <= ^number
)
query
# :required is used since the situation when commit transaction is not found is not possible
|> Chain.join_associations(%{:commitment_transaction => :required})
|> Repo.one()
end
@doc """
Retrieves the L1 block number where the confirmation transaction of the highest confirmed rollup block was included.
## Returns
- The L1 block number if a confirmed rollup block is found and the confirmation transaction is indexed;
`nil` if no confirmed rollup blocks are found or if there is a database inconsistency.
"""
@spec l1_block_of_latest_confirmed_block() :: FullBlock.block_number() | nil
def l1_block_of_latest_confirmed_block do
query =
from(
rb in BatchBlock,
where: not is_nil(rb.confirmation_id),
order_by: [desc: rb.block_number],
limit: 1
)
case query
# :required is used since existence of the confirmation id is checked above
|> Chain.join_associations(%{:confirmation_transaction => :required})
|> Repo.one() do
nil ->
nil
block ->
case block.confirmation_transaction do
# `nil` and `%Ecto.Association.NotLoaded{}` indicate DB inconsistency
nil -> nil
%Ecto.Association.NotLoaded{} -> nil
confirmation_transaction -> confirmation_transaction.block_number
end
end
end
@doc """
Retrieves the number of the highest confirmed rollup block.
## Returns
- The number of the highest confirmed rollup block, or `nil` if no confirmed rollup blocks are found.
"""
@spec highest_confirmed_block() :: FullBlock.block_number() | nil
def highest_confirmed_block do
query =
from(
rb in BatchBlock,
where: not is_nil(rb.confirmation_id),
select: rb.block_number,
order_by: [desc: rb.block_number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Retrieves the number of the latest L1 block where a transaction executing an L2-to-L1 message was discovered.
## Returns
- The number of the latest L1 block with an executing transaction for an L2-to-L1 message, or `nil` if no such transactions are found.
"""
@spec l1_block_of_latest_execution() :: FullBlock.block_number() | nil
def l1_block_of_latest_execution do
query =
from(
tx in LifecycleTransaction,
inner_join: ex in L1Execution,
on: tx.id == ex.execution_id,
select: tx.block_number,
order_by: [desc: tx.block_number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Retrieves the number of the earliest L1 block where a transaction executing an L2-to-L1 message was discovered.
## Returns
- The number of the earliest L1 block with an executing transaction for an L2-to-L1 message, or `nil` if no such transactions are found.
"""
@spec l1_block_of_earliest_execution() :: FullBlock.block_number() | nil
def l1_block_of_earliest_execution do
query =
from(
tx in LifecycleTransaction,
inner_join: ex in L1Execution,
on: tx.id == ex.execution_id,
select: tx.block_number,
order_by: [asc: tx.block_number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Retrieves all unconfirmed rollup blocks within the specified range from `first_block` to `last_block`,
inclusive, where `first_block` is less than or equal to `last_block`.
Since the function relies on the block data generated by the block fetcher, the returned list
may contain fewer blocks than actually exist if some of the blocks have not been indexed by the fetcher yet.
## Parameters
- `first_block`: The rollup block number starting the lookup range.
- `last_block`:The rollup block number ending the lookup range.
## Returns
- A list of maps containing the batch number, rollup block number and hash for each
unconfirmed block within the range. Returns `[]` if no unconfirmed blocks are found
within the range, or if the block fetcher has not indexed them.
"""
@spec unconfirmed_rollup_blocks(FullBlock.block_number(), FullBlock.block_number()) :: [BatchBlock]
def unconfirmed_rollup_blocks(first_block, last_block)
when is_integer(first_block) and first_block >= 0 and
is_integer(last_block) and first_block <= last_block do
query =
from(
rb in BatchBlock,
where: rb.block_number >= ^first_block and rb.block_number <= ^last_block and is_nil(rb.confirmation_id),
order_by: [asc: rb.block_number]
)
Repo.all(query, timeout: :infinity)
end
@doc """
Calculates the number of confirmed rollup blocks in the specified batch.
## Parameters
- `batch_number`: The number of the batch for which the count of confirmed blocks is to be calculated.
## Returns
- The number of confirmed blocks in the batch with the given number.
"""
@spec count_confirmed_rollup_blocks_in_batch(non_neg_integer()) :: non_neg_integer
def count_confirmed_rollup_blocks_in_batch(batch_number)
when is_integer(batch_number) and batch_number >= 0 do
query =
from(
rb in BatchBlock,
where: rb.batch_number == ^batch_number and not is_nil(rb.confirmation_id)
)
Repo.aggregate(query, :count, timeout: :infinity)
end
@doc """
Retrieves all L2-to-L1 messages with the specified status that originated in rollup blocks with numbers not higher than `block_number`.
## Parameters
- `status`: The status of the messages to retrieve, such as `:initiated`, `:sent`, `:confirmed`, or `:relayed`.
- `block_number`: The number of a rollup block that limits the messages lookup.
## Returns
- Instances of `Explorer.Chain.Arbitrum.Message` corresponding to the criteria, or `[]` if no messages
with the given status are found in the rollup blocks up to the specified number.
"""
@spec l2_to_l1_messages(:confirmed | :initiated | :relayed | :sent, FullBlock.block_number()) :: [
Message
]
def l2_to_l1_messages(status, block_number)
when status in [:initiated, :sent, :confirmed, :relayed] and
is_integer(block_number) and
block_number >= 0 do
query =
from(msg in Message,
where:
msg.direction == :from_l2 and msg.originating_transaction_block_number <= ^block_number and
msg.status == ^status,
order_by: [desc: msg.message_id]
)
Repo.all(query, timeout: :infinity)
end
@doc """
Retrieves the numbers of the L1 blocks containing the confirmation transactions
bounding the first interval where missed confirmation transactions could be found.
The absence of a confirmation transaction is assumed based on the analysis of a
series of confirmed rollup blocks. For example, if blocks 0-3 are confirmed by transaction X,
blocks 7-9 by transaction Y, and blocks 12-15 by transaction Z, there are two gaps:
blocks 4-6 and 10-11. According to Arbitrum's nature, this indicates that the confirmation
transactions for blocks 6 and 11 have not yet been indexed.
In the example above, the function will return the tuple with the numbers of the L1 blocks
where transactions Y and Z were included.
## Returns
- A tuple of the L1 block numbers between which missing confirmation transactions are suspected,
or `nil` if no gaps in confirmed blocks are found or if there are no missed confirmation transactions.
"""
@spec l1_blocks_of_confirmations_bounding_first_unconfirmed_rollup_blocks_gap() ::
{FullBlock.block_number() | nil, FullBlock.block_number()} | nil
def l1_blocks_of_confirmations_bounding_first_unconfirmed_rollup_blocks_gap do
# The first subquery retrieves the numbers of confirmed rollup blocks.
rollup_blocks_query =
from(
rb in BatchBlock,
select: %{
block_number: rb.block_number,
confirmation_id: rb.confirmation_id
},
where: not is_nil(rb.confirmation_id)
)
# The second subquery builds on the first one, grouping block numbers by their
# confirmation transactions. As a result, it identifies the starting and ending
# rollup blocks for every transaction.
confirmed_ranges_query =
from(
subquery in subquery(rollup_blocks_query),
select: %{
confirmation_id: subquery.confirmation_id,
min_block_num: min(subquery.block_number),
max_block_num: max(subquery.block_number)
},
group_by: subquery.confirmation_id
)
# The third subquery utilizes the window function LAG to associate each confirmation
# transaction with the starting rollup block of the preceding transaction.
confirmed_combined_ranges_query =
from(
subquery in subquery(confirmed_ranges_query),
select: %{
confirmation_id: subquery.confirmation_id,
min_block_num: subquery.min_block_num,
max_block_num: subquery.max_block_num,
prev_max_number: fragment("LAG(?, 1) OVER (ORDER BY ?)", subquery.max_block_num, subquery.min_block_num),
prev_confirmation_id:
fragment("LAG(?, 1) OVER (ORDER BY ?)", subquery.confirmation_id, subquery.min_block_num)
}
)
# The final query identifies confirmation transactions for which the ending block does
# not precede the starting block of the subsequent confirmation transaction.
main_query =
from(
subquery in subquery(confirmed_combined_ranges_query),
inner_join: tx_cur in LifecycleTransaction,
on: subquery.confirmation_id == tx_cur.id,
left_join: tx_prev in LifecycleTransaction,
on: subquery.prev_confirmation_id == tx_prev.id,
select: {tx_prev.block_number, tx_cur.block_number},
where: subquery.min_block_num - 1 != subquery.prev_max_number or is_nil(subquery.prev_max_number),
order_by: [desc: subquery.min_block_num],
limit: 1
)
main_query
|> Repo.one()
end
@doc """
Retrieves the count of cross-chain messages either sent to or from the rollup.
## Parameters
- `direction`: A string that specifies the message direction; can be "from-rollup" or "to-rollup".
- `options`: A keyword list of options that may include whether to use a replica database.
## Returns
- The total count of cross-chain messages.
"""
@spec messages_count(binary(), api?: boolean()) :: non_neg_integer()
def messages_count(direction, options) when direction == "from-rollup" and is_list(options) do
do_messages_count(:from_l2, options)
end
def messages_count(direction, options) when direction == "to-rollup" and is_list(options) do
do_messages_count(:to_l2, options)
end
# Counts the number of cross-chain messages based on the specified direction.
@spec do_messages_count(:from_l2 | :to_l2, api?: boolean()) :: non_neg_integer()
defp do_messages_count(direction, options) do
Message
|> where([msg], msg.direction == ^direction)
|> select_repo(options).aggregate(:count, timeout: :infinity)
end
@doc """
Retrieves cross-chain messages based on the specified direction.
This function constructs and executes a query to retrieve messages either sent
to or from the rollup layer, applying pagination options. These options dictate
not only the number of items to retrieve but also how many items to skip from
the top.
## Parameters
- `direction`: A string that can be "from-rollup" or "to-rollup", translated internally to `:from_l2` or `:to_l2`.
- `options`: A keyword list specifying pagination details and database preferences.
## Returns
- A list of `Explorer.Chain.Arbitrum.Message` entries.
"""
@spec messages(binary(),
paging_options: PagingOptions.t(),
api?: boolean()
) :: [Message]
def messages(direction, options) when direction == "from-rollup" do
do_messages(:from_l2, options)
end
def messages(direction, options) when direction == "to-rollup" do
do_messages(:to_l2, options)
end
# Executes the query to fetch cross-chain messages based on the specified direction.
#
# This function constructs and executes a query to retrieve messages either sent
# to or from the rollup layer, applying pagination options. These options dictate
# not only the number of items to retrieve but also how many items to skip from
# the top.
#
# ## Parameters
# - `direction`: Can be either `:from_l2` or `:to_l2`, indicating the direction of the messages.
# - `options`: A keyword list of options specifying pagination details and whether to use a replica database.
#
# ## Returns
# - A list of `Explorer.Chain.Arbitrum.Message` entries matching the specified direction.
@spec do_messages(:from_l2 | :to_l2,
paging_options: PagingOptions.t(),
api?: boolean()
) :: [Message]
defp do_messages(direction, options) do
base_query =
from(msg in Message,
where: msg.direction == ^direction,
order_by: [desc: msg.message_id]
)
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options())
query =
base_query
|> page_messages(paging_options)
|> limit(^paging_options.page_size)
select_repo(options).all(query)
end
defp page_messages(query, %PagingOptions{key: nil}), do: query
defp page_messages(query, %PagingOptions{key: {id}}) do
from(msg in query, where: msg.message_id < ^id)
end
@doc """
Retrieves a list of relayed L1 to L2 messages that have been completed.
## Parameters
- `options`: A keyword list of options specifying whether to use a replica database and how pagination should be handled.
## Returns
- A list of `Explorer.Chain.Arbitrum.Message` representing relayed messages from L1 to L2 that have been completed.
"""
@spec relayed_l1_to_l2_messages(
paging_options: PagingOptions.t(),
api?: boolean()
) :: [Message]
def relayed_l1_to_l2_messages(options) do
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options())
query =
from(msg in Message,
where: msg.direction == :to_l2 and not is_nil(msg.completion_transaction_hash),
order_by: [desc: msg.message_id],
limit: ^paging_options.page_size
)
select_repo(options).all(query)
end
@doc """
Retrieves the total count of rollup batches indexed up to the current moment.
This function uses an estimated count from system catalogs if available.
If the estimate is unavailable, it performs an exact count using an aggregate query.
## Parameters
- `options`: A keyword list specifying options, including whether to use a replica database.
## Returns
- The count of indexed batches.
"""
@spec batches_count(api?: boolean()) :: non_neg_integer()
def batches_count(options) do
Chain.get_table_rows_total_count(L1Batch, options)
end
@doc """
Retrieves a specific batch by its number or fetches the latest batch if `:latest` is specified.
## Parameters
- `number`: Can be either the specific batch number or `:latest` to retrieve
the most recent batch in the database.
- `options`: A keyword list specifying the necessity for joining associations
and whether to use a replica database.
## Returns
- `{:ok, Explorer.Chain.Arbitrum.L1Batch}` if the batch is found.
- `{:error, :not_found}` if no batch with the specified number exists.
"""
def batch(number, options)
@spec batch(:latest, api?: boolean()) :: {:error, :not_found} | {:ok, L1Batch}
def batch(:latest, options) do
L1Batch
|> order_by(desc: :number)
|> limit(1)
|> select_repo(options).one()
|> case do
nil -> {:error, :not_found}
batch -> {:ok, batch}
end
end
@spec batch(binary() | non_neg_integer(),
necessity_by_association: %{atom() => :optional | :required},
api?: boolean()
) :: {:error, :not_found} | {:ok, L1Batch}
def batch(number, options) do
necessity_by_association = Keyword.get(options, :necessity_by_association, %{})
L1Batch
|> where(number: ^number)
|> Chain.join_associations(necessity_by_association)
|> select_repo(options).one()
|> case do
nil -> {:error, :not_found}
batch -> {:ok, batch}
end
end
@doc """
Retrieves a list of batches from the database.
This function constructs and executes a query to retrieve batches based on provided
pagination options. These options dictate not only the number of items to retrieve
but also how many items to skip from the top. If the `committed?` option is set to true,
it returns the ten most recent committed batches; otherwise, it fetches batches as
dictated by other pagination parameters.
## Parameters
- `options`: A keyword list of options specifying pagination, necessity for joining associations,
and whether to use a replica database.
## Returns
- A list of `Explorer.Chain.Arbitrum.L1Batch` entries, filtered and ordered according to the provided options.
"""
@spec batches(
necessity_by_association: %{atom() => :optional | :required},
committed?: boolean(),
paging_options: PagingOptions.t(),
api?: boolean()
) :: [L1Batch]
def batches(options) do
necessity_by_association = Keyword.get(options, :necessity_by_association, %{})
base_query =
from(batch in L1Batch,
order_by: [desc: batch.number]
)
query =
if Keyword.get(options, :committed?, false) do
base_query
|> Chain.join_associations(necessity_by_association)
|> where([batch], not is_nil(batch.commitment_id) and batch.commitment_id > 0)
|> limit(10)
else
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options())
base_query
|> Chain.join_associations(necessity_by_association)
|> page_batches(paging_options)
|> limit(^paging_options.page_size)
end
select_repo(options).all(query)
end
defp page_batches(query, %PagingOptions{key: nil}), do: query
defp page_batches(query, %PagingOptions{key: {number}}) do
from(batch in query, where: batch.number < ^number)
end
@doc """
Retrieves a list of rollup transactions included in a specific batch.
## Parameters
- `batch_number`: The batch number whose transactions were included in L1.
- `options`: A keyword list specifying options, including whether to use a replica database.
## Returns
- A list of `Explorer.Chain.Arbitrum.BatchTransaction` entries belonging to the specified batch.
"""
@spec batch_transactions(non_neg_integer() | binary(), api?: boolean()) :: [BatchTransaction]
def batch_transactions(batch_number, options) do
query = from(tx in BatchTransaction, where: tx.batch_number == ^batch_number)
select_repo(options).all(query)
end
@doc """
Retrieves a list of rollup blocks included in a specific batch.
This function constructs and executes a database query to retrieve a list of rollup blocks,
considering pagination options specified in the `options` parameter. These options dictate
the number of items to retrieve and how many items to skip from the top.
## Parameters
- `batch_number`: The batch number whose transactions are included on L1.
- `options`: A keyword list of options specifying pagination, association necessity, and
whether to use a replica database.
## Returns
- A list of `Explorer.Chain.Block` entries belonging to the specified batch.
"""
@spec batch_blocks(non_neg_integer() | binary(),
necessity_by_association: %{atom() => :optional | :required},
api?: boolean(),
paging_options: PagingOptions.t()
) :: [FullBlock]
def batch_blocks(batch_number, options) do
necessity_by_association = Keyword.get(options, :necessity_by_association, %{})
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options())
query =
from(
fb in FullBlock,
inner_join: rb in BatchBlock,
on: fb.number == rb.block_number,
select: fb,
where: fb.consensus == true and rb.batch_number == ^batch_number
)
query
|> FullBlock.block_type_filter("Block")
|> page_blocks(paging_options)
|> limit(^paging_options.page_size)
|> order_by(desc: :number)
|> Chain.join_associations(necessity_by_association)
|> select_repo(options).all()
end
defp page_blocks(query, %PagingOptions{key: nil}), do: query
defp page_blocks(query, %PagingOptions{key: {block_number}}) do
where(query, [block], block.number < ^block_number)
end
end

@ -1,7 +1,13 @@
defmodule Explorer.Chain.Block.Schema do
@moduledoc false
@moduledoc """
Models blocks.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Blocks
"""
alias Explorer.Chain.{Address, Block, Hash, PendingBlockOperation, Transaction, Wei, Withdrawal}
alias Explorer.Chain.Arbitrum.BatchBlock, as: ArbitrumBatchBlock
alias Explorer.Chain.Block.{Reward, SecondDegreeRelation}
alias Explorer.Chain.ZkSync.BatchBlock, as: ZkSyncBatchBlock
@ -39,6 +45,31 @@ defmodule Explorer.Chain.Block.Schema do
2
)
:arbitrum ->
elem(
quote do
field(:send_count, :integer)
field(:send_root, Hash.Full)
field(:l1_block_number, :integer)
has_one(:arbitrum_batch_block, ArbitrumBatchBlock,
foreign_key: :block_number,
references: :number
)
has_one(:arbitrum_batch, through: [:arbitrum_batch_block, :batch])
has_one(:arbitrum_commitment_transaction,
through: [:arbitrum_batch, :commitment_transaction]
)
has_one(:arbitrum_confirmation_transaction,
through: [:arbitrum_batch_block, :confirmation_transaction]
)
end,
2
)
_ ->
[]
end)
@ -105,18 +136,20 @@ defmodule Explorer.Chain.Block do
alias Explorer.Utility.MissingRangesManipulator
@optional_attrs ~w(size refetch_needed total_difficulty difficulty base_fee_per_gas)a
|> (&(case Application.compile_env(:explorer, :chain_type) do
:rsk ->
&1 ++
~w(minimum_gas_price bitcoin_merged_mining_header bitcoin_merged_mining_coinbase_transaction bitcoin_merged_mining_merkle_proof hash_for_merged_mining)a
:ethereum ->
&1 ++
~w(blob_gas_used excess_blob_gas)a
@chain_type_optional_attrs (case Application.compile_env(:explorer, :chain_type) do
:rsk ->
~w(minimum_gas_price bitcoin_merged_mining_header bitcoin_merged_mining_coinbase_transaction bitcoin_merged_mining_merkle_proof hash_for_merged_mining)a
:ethereum ->
~w(blob_gas_used excess_blob_gas)a
:arbitrum ->
~w(send_count send_root l1_block_number)a
_ ->
&1
end)).()
_ ->
~w()a
end)
@required_attrs ~w(consensus gas_limit gas_used hash miner_hash nonce number parent_hash timestamp)a
@ -173,7 +206,7 @@ defmodule Explorer.Chain.Block do
def changeset(%__MODULE__{} = block, attrs) do
block
|> cast(attrs, @required_attrs ++ @optional_attrs)
|> cast(attrs, @required_attrs ++ @optional_attrs ++ @chain_type_optional_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:parent_hash)
|> unique_constraint(:hash, name: :blocks_pkey)
@ -181,7 +214,7 @@ defmodule Explorer.Chain.Block do
def number_only_changeset(%__MODULE__{} = block, attrs) do
block
|> cast(attrs, @required_attrs ++ @optional_attrs)
|> cast(attrs, @required_attrs ++ @optional_attrs ++ @chain_type_optional_attrs)
|> validate_required([:number])
|> foreign_key_constraint(:parent_hash)
|> unique_constraint(:hash, name: :blocks_pkey)

@ -4,6 +4,22 @@ defmodule Explorer.Chain.Cache.Helper do
"""
alias Explorer.Chain
@doc """
Estimates the row count of a given table using PostgreSQL system catalogs.
This function executes a query to estimate the number of rows in the specified
table based on the table's reltuples and relpages values from the pg_class catalog.
It provides a fast estimation rather than an exact count.
## Parameters
- `table_name`: The name of the table to estimate the row count for.
- `options`: An optional keyword list of options, such as selecting a specific repository.
## Returns
- An estimated count of rows in the specified table or `nil` if the estimation is not available.
"""
@spec estimated_count_from(binary(), keyword()) :: non_neg_integer() | nil
@spec estimated_count_from(binary()) :: non_neg_integer() | nil
def estimated_count_from(table_name, options \\ []) do
%Postgrex.Result{rows: [[count]]} =
Chain.select_repo(options).query!(

@ -0,0 +1,104 @@
defmodule Explorer.Chain.Import.Runner.Arbitrum.BatchBlocks do
@moduledoc """
Bulk imports of Explorer.Chain.Arbitrum.BatchBlock.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Arbitrum.BatchBlock
alias Explorer.Chain.Import
alias Explorer.Prometheus.Instrumenter
import Ecto.Query, only: [from: 2]
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [BatchBlock.t()]
@impl Import.Runner
def ecto_schema_module, do: BatchBlock
@impl Import.Runner
def option_key, do: :arbitrum_batch_blocks
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_arbitrum_batch_blocks, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:arbitrum_batch_blocks,
:arbitrum_batch_blocks
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [BatchBlock.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0)
# Enforce Arbitrum.BatchBlock ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.block_number)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: BatchBlock,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :block_number,
on_conflict: on_conflict
)
{:ok, inserted}
end
defp default_on_conflict do
from(
tb in BatchBlock,
update: [
set: [
# don't update `block_number` as it is a primary key and used for the conflict target
batch_number: fragment("EXCLUDED.batch_number"),
confirmation_id: fragment("EXCLUDED.confirmation_id"),
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at)
]
],
where:
fragment(
"(EXCLUDED.batch_number, EXCLUDED.confirmation_id) IS DISTINCT FROM (?, ?)",
tb.batch_number,
tb.confirmation_id
)
)
end
end

@ -0,0 +1,79 @@
defmodule Explorer.Chain.Import.Runner.Arbitrum.BatchTransactions do
@moduledoc """
Bulk imports of Explorer.Chain.Arbitrum.BatchTransaction.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Arbitrum.BatchTransaction
alias Explorer.Chain.Import
alias Explorer.Prometheus.Instrumenter
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [BatchTransaction.t()]
@impl Import.Runner
def ecto_schema_module, do: BatchTransaction
@impl Import.Runner
def option_key, do: :arbitrum_batch_transactions
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_arbitrum_batch_transactions, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:arbitrum_batch_transactions,
:arbitrum_batch_transactions
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [BatchTransaction.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = _options) when is_list(changes_list) do
# Enforce Arbitrum.BatchTransaction ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.tx_hash)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: BatchTransaction,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :tx_hash,
on_conflict: :nothing
)
{:ok, inserted}
end
end

@ -0,0 +1,112 @@
defmodule Explorer.Chain.Import.Runner.Arbitrum.L1Batches do
@moduledoc """
Bulk imports of Explorer.Chain.Arbitrum.L1Batch.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Arbitrum.L1Batch
alias Explorer.Chain.Import
alias Explorer.Prometheus.Instrumenter
import Ecto.Query, only: [from: 2]
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [L1Batch.t()]
@impl Import.Runner
def ecto_schema_module, do: L1Batch
@impl Import.Runner
def option_key, do: :arbitrum_l1_batches
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_arbitrum_l1_batches, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:arbitrum_l1_batches,
:arbitrum_l1_batches
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [L1Batch.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0)
# Enforce Arbitrum.L1Batch ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.number)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: L1Batch,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :number,
on_conflict: on_conflict
)
{:ok, inserted}
end
defp default_on_conflict do
from(
tb in L1Batch,
update: [
set: [
# don't update `number` as it is a primary key and used for the conflict target
transactions_count: fragment("EXCLUDED.transactions_count"),
start_block: fragment("EXCLUDED.start_block"),
end_block: fragment("EXCLUDED.end_block"),
before_acc: fragment("EXCLUDED.before_acc"),
after_acc: fragment("EXCLUDED.after_acc"),
commitment_id: fragment("EXCLUDED.commitment_id"),
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at)
]
],
where:
fragment(
"(EXCLUDED.transactions_count, EXCLUDED.start_block, EXCLUDED.end_block, EXCLUDED.before_acc, EXCLUDED.after_acc, EXCLUDED.commitment_id) IS DISTINCT FROM (?, ?, ?, ?, ?, ?)",
tb.transactions_count,
tb.start_block,
tb.end_block,
tb.before_acc,
tb.after_acc,
tb.commitment_id
)
)
end
end

@ -0,0 +1,102 @@
defmodule Explorer.Chain.Import.Runner.Arbitrum.L1Executions do
@moduledoc """
Bulk imports of Explorer.Chain.Arbitrum.L1Execution.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Arbitrum.L1Execution
alias Explorer.Chain.Import
alias Explorer.Prometheus.Instrumenter
import Ecto.Query, only: [from: 2]
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [L1Execution.t()]
@impl Import.Runner
def ecto_schema_module, do: L1Execution
@impl Import.Runner
def option_key, do: :arbitrum_l1_executions
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_arbitrum_l1_executions, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:arbitrum_l1_executions,
:arbitrum_l1_executions
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [L1Execution.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0)
# Enforce Arbitrum.L1Execution ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.message_id)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: L1Execution,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :message_id,
on_conflict: on_conflict
)
{:ok, inserted}
end
defp default_on_conflict do
from(
tb in L1Execution,
update: [
set: [
# don't update `message_id` as it is a primary key and used for the conflict target
execution_id: fragment("EXCLUDED.execution_id"),
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at)
]
],
where:
fragment(
"(EXCLUDED.execution_id) IS DISTINCT FROM (?)",
tb.execution_id
)
)
end
end

@ -0,0 +1,107 @@
defmodule Explorer.Chain.Import.Runner.Arbitrum.LifecycleTransactions do
@moduledoc """
Bulk imports of Explorer.Chain.Arbitrum.LifecycleTransaction.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Arbitrum.LifecycleTransaction
alias Explorer.Chain.Import
alias Explorer.Prometheus.Instrumenter
import Ecto.Query, only: [from: 2]
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [LifecycleTransaction.t()]
@impl Import.Runner
def ecto_schema_module, do: LifecycleTransaction
@impl Import.Runner
def option_key, do: :arbitrum_lifecycle_transactions
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_arbitrum_lifecycle_transactions, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:arbitrum_lifecycle_transactions,
:arbitrum_lifecycle_transactions
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [LifecycleTransaction.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0)
# Enforce Arbitrum.LifecycleTransaction ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.id)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: LifecycleTransaction,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :hash,
on_conflict: on_conflict
)
{:ok, inserted}
end
defp default_on_conflict do
from(
tx in LifecycleTransaction,
update: [
set: [
# don't update `id` as it is a primary key
# don't update `hash` as it is a unique index and used for the conflict target
timestamp: fragment("EXCLUDED.timestamp"),
block_number: fragment("EXCLUDED.block_number"),
status: fragment("GREATEST(?, EXCLUDED.status)", tx.status),
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tx.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tx.updated_at)
]
],
where:
fragment(
"(EXCLUDED.timestamp, EXCLUDED.block_number, EXCLUDED.status) IS DISTINCT FROM (?, ?, ?)",
tx.timestamp,
tx.block_number,
tx.status
)
)
end
end

@ -0,0 +1,117 @@
defmodule Explorer.Chain.Import.Runner.Arbitrum.Messages do
@moduledoc """
Bulk imports of Explorer.Chain.Arbitrum.Message.
"""
require Ecto.Query
import Ecto.Query, only: [from: 2]
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Arbitrum.Message, as: CrosslevelMessage
alias Explorer.Chain.Import
alias Explorer.Prometheus.Instrumenter
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [CrosslevelMessage.t()]
@impl Import.Runner
def ecto_schema_module, do: CrosslevelMessage
@impl Import.Runner
def option_key, do: :arbitrum_messages
@impl Import.Runner
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_arbitrum_messages, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:arbitrum_messages,
:arbitrum_messages
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [CrosslevelMessage.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0)
# Enforce Message ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, &{&1.direction, &1.message_id})
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
conflict_target: [:direction, :message_id],
on_conflict: on_conflict,
for: CrosslevelMessage,
returning: true,
timeout: timeout,
timestamps: timestamps
)
{:ok, inserted}
end
defp default_on_conflict do
from(
op in CrosslevelMessage,
update: [
set: [
# Don't update `direction` as it is part of the composite primary key and used for the conflict target
# Don't update `message_id` as it is part of the composite primary key and used for the conflict target
originator_address: fragment("COALESCE(EXCLUDED.originator_address, ?)", op.originator_address),
originating_transaction_hash:
fragment("COALESCE(EXCLUDED.originating_transaction_hash, ?)", op.originating_transaction_hash),
origination_timestamp: fragment("COALESCE(EXCLUDED.origination_timestamp, ?)", op.origination_timestamp),
originating_transaction_block_number:
fragment(
"COALESCE(EXCLUDED.originating_transaction_block_number, ?)",
op.originating_transaction_block_number
),
completion_transaction_hash:
fragment("COALESCE(EXCLUDED.completion_transaction_hash, ?)", op.completion_transaction_hash),
status: fragment("GREATEST(?, EXCLUDED.status)", op.status),
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", op.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", op.updated_at)
]
],
where:
fragment(
"(EXCLUDED.originator_address, EXCLUDED.originating_transaction_hash, EXCLUDED.origination_timestamp, EXCLUDED.originating_transaction_block_number, EXCLUDED.completion_transaction_hash, EXCLUDED.status) IS DISTINCT FROM (?, ?, ?, ?, ?, ?)",
op.originator_address,
op.originating_transaction_hash,
op.origination_timestamp,
op.originating_transaction_block_number,
op.completion_transaction_hash,
op.status
)
)
end
end

@ -107,9 +107,9 @@ defmodule Explorer.Chain.Import.Runner.Transactions do
)
end
defp default_on_conflict do
case Application.get_env(:explorer, :chain_type) do
:suave ->
case Application.compile_env(:explorer, :chain_type) do
:suave ->
defp default_on_conflict do
from(
transaction in Transaction,
update: [
@ -204,8 +204,10 @@ defmodule Explorer.Chain.Import.Runner.Transactions do
transaction.wrapped_hash
)
)
end
:optimism ->
:optimism ->
defp default_on_conflict do
from(
transaction in Transaction,
update: [
@ -284,8 +286,82 @@ defmodule Explorer.Chain.Import.Runner.Transactions do
transaction.l1_block_number
)
)
end
_ ->
:arbitrum ->
defp default_on_conflict do
from(
transaction in Transaction,
update: [
set: [
block_hash: fragment("EXCLUDED.block_hash"),
old_block_hash: transaction.block_hash,
block_number: fragment("EXCLUDED.block_number"),
block_consensus: fragment("EXCLUDED.block_consensus"),
block_timestamp: fragment("EXCLUDED.block_timestamp"),
created_contract_address_hash: fragment("EXCLUDED.created_contract_address_hash"),
created_contract_code_indexed_at: fragment("EXCLUDED.created_contract_code_indexed_at"),
cumulative_gas_used: fragment("EXCLUDED.cumulative_gas_used"),
error: fragment("EXCLUDED.error"),
from_address_hash: fragment("EXCLUDED.from_address_hash"),
gas: fragment("EXCLUDED.gas"),
gas_price: fragment("EXCLUDED.gas_price"),
gas_used: fragment("EXCLUDED.gas_used"),
index: fragment("EXCLUDED.index"),
input: fragment("EXCLUDED.input"),
nonce: fragment("EXCLUDED.nonce"),
r: fragment("EXCLUDED.r"),
s: fragment("EXCLUDED.s"),
status: fragment("EXCLUDED.status"),
to_address_hash: fragment("EXCLUDED.to_address_hash"),
v: fragment("EXCLUDED.v"),
value: fragment("EXCLUDED.value"),
earliest_processing_start: fragment("EXCLUDED.earliest_processing_start"),
revert_reason: fragment("EXCLUDED.revert_reason"),
max_priority_fee_per_gas: fragment("EXCLUDED.max_priority_fee_per_gas"),
max_fee_per_gas: fragment("EXCLUDED.max_fee_per_gas"),
type: fragment("EXCLUDED.type"),
gas_used_for_l1: fragment("EXCLUDED.gas_used_for_l1"),
# Don't update `hash` as it is part of the primary key and used for the conflict target
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", transaction.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", transaction.updated_at)
]
],
where:
fragment(
"(EXCLUDED.block_hash, EXCLUDED.block_number, EXCLUDED.block_consensus, EXCLUDED.block_timestamp, EXCLUDED.created_contract_address_hash, EXCLUDED.created_contract_code_indexed_at, EXCLUDED.cumulative_gas_used, EXCLUDED.from_address_hash, EXCLUDED.gas, EXCLUDED.gas_price, EXCLUDED.gas_used, EXCLUDED.index, EXCLUDED.input, EXCLUDED.nonce, EXCLUDED.r, EXCLUDED.s, EXCLUDED.status, EXCLUDED.to_address_hash, EXCLUDED.v, EXCLUDED.value, EXCLUDED.earliest_processing_start, EXCLUDED.revert_reason, EXCLUDED.max_priority_fee_per_gas, EXCLUDED.max_fee_per_gas, EXCLUDED.type, EXCLUDED.gas_used_for_l1) IS DISTINCT FROM (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
transaction.block_hash,
transaction.block_number,
transaction.block_consensus,
transaction.block_timestamp,
transaction.created_contract_address_hash,
transaction.created_contract_code_indexed_at,
transaction.cumulative_gas_used,
transaction.from_address_hash,
transaction.gas,
transaction.gas_price,
transaction.gas_used,
transaction.index,
transaction.input,
transaction.nonce,
transaction.r,
transaction.s,
transaction.status,
transaction.to_address_hash,
transaction.v,
transaction.value,
transaction.earliest_processing_start,
transaction.revert_reason,
transaction.max_priority_fee_per_gas,
transaction.max_fee_per_gas,
transaction.type,
transaction.gas_used_for_l1
)
)
end
_ ->
defp default_on_conflict do
from(
transaction in Transaction,
update: [
@ -352,7 +428,7 @@ defmodule Explorer.Chain.Import.Runner.Transactions do
transaction.type
)
)
end
end
end
defp discard_blocks_for_recollated_transactions(repo, changes_list, %{

@ -59,6 +59,15 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do
Runner.Beacon.BlobTransactions
]
@arbitrum_runners [
Runner.Arbitrum.Messages,
Runner.Arbitrum.LifecycleTransactions,
Runner.Arbitrum.L1Executions,
Runner.Arbitrum.L1Batches,
Runner.Arbitrum.BatchBlocks,
Runner.Arbitrum.BatchTransactions
]
@impl Stage
def runners do
case Application.get_env(:explorer, :chain_type) do
@ -80,6 +89,9 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do
:zksync ->
@default_runners ++ @zksync_runners
:arbitrum ->
@default_runners ++ @arbitrum_runners
_ ->
@default_runners
end
@ -88,7 +100,9 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do
@impl Stage
def all_runners do
@default_runners ++
@optimism_runners ++ @polygon_edge_runners ++ @polygon_zkevm_runners ++ @shibarium_runners ++ @zksync_runners
@ethereum_runners ++
@optimism_runners ++
@polygon_edge_runners ++ @polygon_zkevm_runners ++ @shibarium_runners ++ @zksync_runners ++ @arbitrum_runners
end
@impl Stage

@ -1,5 +1,10 @@
defmodule Explorer.Chain.Transaction.Schema do
@moduledoc false
@moduledoc """
Models transactions.
Changes in the schema should be reflected in the bulk import module:
- Explorer.Chain.Import.Runner.Transactions
"""
alias Explorer.Chain.{
Address,
@ -14,6 +19,9 @@ defmodule Explorer.Chain.Transaction.Schema do
Wei
}
alias Explorer.Chain.Arbitrum.BatchBlock, as: ArbitrumBatchBlock
alias Explorer.Chain.Arbitrum.BatchTransaction, as: ArbitrumBatchTransaction
alias Explorer.Chain.Arbitrum.Message, as: ArbitrumMessage
alias Explorer.Chain.PolygonZkevm.BatchTransaction, as: ZkevmBatchTransaction
alias Explorer.Chain.Transaction.{Fork, Status}
alias Explorer.Chain.ZkSync.BatchTransaction, as: ZkSyncBatchTransaction
@ -114,6 +122,44 @@ defmodule Explorer.Chain.Transaction.Schema do
2
)
:arbitrum ->
elem(
quote do
field(:gas_used_for_l1, :decimal)
has_one(:arbitrum_batch_transaction, ArbitrumBatchTransaction,
foreign_key: :tx_hash,
references: :hash
)
has_one(:arbitrum_batch, through: [:arbitrum_batch_transaction, :batch])
has_one(:arbitrum_commitment_transaction,
through: [:arbitrum_batch, :commitment_transaction]
)
has_one(:arbitrum_batch_block, ArbitrumBatchBlock,
foreign_key: :block_number,
references: :block_number
)
has_one(:arbitrum_confirmation_transaction,
through: [:arbitrum_batch_block, :confirmation_transaction]
)
has_one(:arbitrum_message_to_l2, ArbitrumMessage,
foreign_key: :completion_transaction_hash,
references: :hash
)
has_one(:arbitrum_message_from_l2, ArbitrumMessage,
foreign_key: :originating_transaction_hash,
references: :hash
)
end,
2
)
_ ->
[]
end)
@ -234,16 +280,27 @@ defmodule Explorer.Chain.Transaction do
alias Explorer.SmartContract.SigProviderInterface
@optional_attrs ~w(max_priority_fee_per_gas max_fee_per_gas block_hash block_number block_consensus block_timestamp created_contract_address_hash cumulative_gas_used earliest_processing_start
error gas_price gas_used index created_contract_code_indexed_at status
@optional_attrs ~w(max_priority_fee_per_gas max_fee_per_gas block_hash block_number
block_consensus block_timestamp created_contract_address_hash
cumulative_gas_used earliest_processing_start error gas_price
gas_used index created_contract_code_indexed_at status
to_address_hash revert_reason type has_error_in_internal_txs r s v)a
@optimism_optional_attrs ~w(l1_fee l1_fee_scalar l1_gas_price l1_gas_used l1_tx_origin l1_block_number)a
@suave_optional_attrs ~w(execution_node_hash wrapped_type wrapped_nonce wrapped_to_address_hash wrapped_gas wrapped_gas_price wrapped_max_priority_fee_per_gas wrapped_max_fee_per_gas wrapped_value wrapped_input wrapped_v wrapped_r wrapped_s wrapped_hash)a
@chain_type_optional_attrs (case Application.compile_env(:explorer, :chain_type) do
:optimism ->
~w(l1_fee l1_fee_scalar l1_gas_price l1_gas_used l1_tx_origin l1_block_number)a
@required_attrs ~w(from_address_hash gas hash input nonce value)a
:suave ->
~w(execution_node_hash wrapped_type wrapped_nonce wrapped_to_address_hash wrapped_gas wrapped_gas_price wrapped_max_priority_fee_per_gas wrapped_max_fee_per_gas wrapped_value wrapped_input wrapped_v wrapped_r wrapped_s wrapped_hash)a
:arbitrum ->
~w(gas_used_for_l1)a
@empty_attrs ~w()a
_ ->
~w()a
end)
@required_attrs ~w(from_address_hash gas hash input nonce value)a
@typedoc """
X coordinate module n in
@ -562,7 +619,7 @@ defmodule Explorer.Chain.Transaction do
attrs_to_cast =
@required_attrs ++
@optional_attrs ++
custom_optional_attrs()
@chain_type_optional_attrs
transaction
|> cast(attrs, attrs_to_cast)
@ -577,14 +634,6 @@ defmodule Explorer.Chain.Transaction do
|> unique_constraint(:hash)
end
defp custom_optional_attrs do
case Application.get_env(:explorer, :chain_type) do
:suave -> @suave_optional_attrs
:optimism -> @optimism_optional_attrs
_ -> @empty_attrs
end
end
@spec block_timestamp(t()) :: DateTime.t()
def block_timestamp(%{block_number: nil, inserted_at: time}), do: time
def block_timestamp(%{block_timestamp: time}) when not is_nil(time), do: time

@ -87,6 +87,7 @@ defmodule Explorer.ChainSpec.GenesisData do
def fetch_genesis_data do
chain_spec_path = get_path(:chain_spec_path)
precompiled_config_path = get_path(:precompiled_config_path)
Logger.info(fn -> "Fetching precompiled config path: #{inspect(precompiled_config_path)}." end)
if is_nil(chain_spec_path) and is_nil(precompiled_config_path) do
Logger.warn(fn -> "Genesis data is not fetched. Neither chain spec path or precompiles config path are set." end)

@ -217,6 +217,16 @@ defmodule Explorer.Repo do
end
end
defmodule Arbitrum do
use Ecto.Repo,
otp_app: :explorer,
adapter: Ecto.Adapters.Postgres
def init(_, opts) do
ConfigHelper.init_repo_module(__MODULE__, opts)
end
end
defmodule BridgedTokens do
use Ecto.Repo,
otp_app: :explorer,

@ -4,7 +4,7 @@ defmodule Explorer.Utility.MissingBlockRange do
"""
use Explorer.Schema
alias Explorer.Chain.BlockNumberHelper
alias Explorer.Chain.{Block, BlockNumberHelper}
alias Explorer.Repo
@default_returning_batch_size 10
@ -129,6 +129,38 @@ defmodule Explorer.Utility.MissingBlockRange do
|> Enum.map(&save_range/1)
end
@doc """
Finds the first range in the table where the set, consisting of numbers from `lower_number` to `higher_number`, intersects.
## Parameters
- `lower_number`: The lower bound of the range to check.
- `higher_number`: The upper bound of the range to check.
## Returns
- Returns `nil` if no intersecting ranges are found, or an `Explorer.Utility.MissingBlockRange` instance of the first intersecting range otherwise.
"""
@spec intersects_with_range(Block.block_number(), Block.block_number()) :: nil | Explorer.Utility.MissingBlockRange
def intersects_with_range(lower_number, higher_number)
when is_integer(lower_number) and lower_number >= 0 and
is_integer(higher_number) and lower_number <= higher_number do
query =
from(
r in __MODULE__,
# Note: from_number is higher than to_number, so in fact the range is to_number..from_number
# The first case: lower_number..to_number..higher_number
# The second case: lower_number..from_number..higher_number
# The third case: to_number..lower_number..higher_number..from_number
where:
(^lower_number <= r.to_number and ^higher_number >= r.to_number) or
(^lower_number <= r.from_number and ^higher_number >= r.from_number) or
(^lower_number >= r.to_number and ^higher_number <= r.from_number),
limit: 1
)
query
|> Repo.one()
end
defp insert_range(params) do
params
|> changeset()

@ -0,0 +1,124 @@
defmodule Explorer.Repo.Arbitrum.Migrations.CreateArbitrumTables do
use Ecto.Migration
def change do
execute(
"CREATE TYPE arbitrum_messages_op_type AS ENUM ('to_l2', 'from_l2')",
"DROP TYPE arbitrum_messages_op_type"
)
execute(
"CREATE TYPE arbitrum_messages_status AS ENUM ('initiated', 'sent', 'confirmed', 'relayed')",
"DROP TYPE arbitrum_messages_status"
)
execute(
"CREATE TYPE l1_tx_status AS ENUM ('unfinalized', 'finalized')",
"DROP TYPE l1_tx_status"
)
create table(:arbitrum_crosslevel_messages, primary_key: false) do
add(:direction, :arbitrum_messages_op_type, null: false, primary_key: true)
add(:message_id, :integer, null: false, primary_key: true)
add(:originator_address, :bytea, null: true)
add(:originating_transaction_hash, :bytea, null: true)
add(:origination_timestamp, :"timestamp without time zone", null: true)
add(:originating_transaction_block_number, :bigint, null: true)
add(:completion_transaction_hash, :bytea, null: true)
add(:status, :arbitrum_messages_status, null: false)
timestamps(null: false, type: :utc_datetime_usec)
end
create(index(:arbitrum_crosslevel_messages, [:direction, :originating_transaction_block_number, :status]))
create(index(:arbitrum_crosslevel_messages, [:direction, :completion_transaction_hash]))
create table(:arbitrum_lifecycle_l1_transactions, primary_key: false) do
add(:id, :integer, null: false, primary_key: true)
add(:hash, :bytea, null: false)
add(:block_number, :integer, null: false)
add(:timestamp, :"timestamp without time zone", null: false)
add(:status, :l1_tx_status, null: false)
timestamps(null: false, type: :utc_datetime_usec)
end
create(unique_index(:arbitrum_lifecycle_l1_transactions, :hash))
create(index(:arbitrum_lifecycle_l1_transactions, [:block_number, :status]))
create table(:arbitrum_l1_executions, primary_key: false) do
add(:message_id, :integer, null: false, primary_key: true)
add(
:execution_id,
references(:arbitrum_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer),
null: false
)
timestamps(null: false, type: :utc_datetime_usec)
end
create table(:arbitrum_l1_batches, primary_key: false) do
add(:number, :integer, null: false, primary_key: true)
add(:transactions_count, :integer, null: false)
add(:start_block, :integer, null: false)
add(:end_block, :integer, null: false)
add(:before_acc, :bytea, null: false)
add(:after_acc, :bytea, null: false)
add(
:commitment_id,
references(:arbitrum_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer),
null: false
)
timestamps(null: false, type: :utc_datetime_usec)
end
create table(:arbitrum_batch_l2_blocks, primary_key: false) do
add(
:batch_number,
references(:arbitrum_l1_batches,
column: :number,
on_delete: :delete_all,
on_update: :update_all,
type: :integer
),
null: false
)
add(
:confirmation_id,
references(:arbitrum_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer),
null: true
)
# Although it is possible to recover the block number from the block hash,
# it is more efficient to store it directly
# There could be no DB inconsistencies with `blocks` table caused be re-orgs
# because the blocks will appear in the table `arbitrum_batch_l2_blocks`
# only when they are included in the batch.
add(:block_number, :integer, null: false, primary_key: true)
timestamps(null: false, type: :utc_datetime_usec)
end
create(index(:arbitrum_batch_l2_blocks, :batch_number))
create(index(:arbitrum_batch_l2_blocks, :confirmation_id))
create table(:arbitrum_batch_l2_transactions, primary_key: false) do
add(
:batch_number,
references(:arbitrum_l1_batches,
column: :number,
on_delete: :delete_all,
on_update: :update_all,
type: :integer
),
null: false
)
add(:tx_hash, :bytea, null: false, primary_key: true)
timestamps(null: false, type: :utc_datetime_usec)
end
create(index(:arbitrum_batch_l2_transactions, :batch_number))
end
end

@ -0,0 +1,15 @@
defmodule Explorer.Repo.Arbitrum.Migrations.ExtendTransactionAndBlockTables do
use Ecto.Migration
def change do
alter table(:blocks) do
add(:send_count, :integer)
add(:send_root, :bytea)
add(:l1_block_number, :integer)
end
alter table(:transactions) do
add(:gas_used_for_l1, :numeric, precision: 100)
end
end
end

@ -522,6 +522,21 @@ defmodule Explorer.Factory do
timestamp: DateTime.utc_now(),
refetch_needed: false
}
|> Map.merge(block_factory_chain_type_fields())
end
case Application.compile_env(:explorer, :chain_type) do
:arbitrum ->
defp block_factory_chain_type_fields() do
%{
send_count: Enum.random(1..100_000),
send_root: block_hash(),
l1_block_number: Enum.random(1..100_000)
}
end
_ ->
defp block_factory_chain_type_fields(), do: %{}
end
def contract_method_factory() do
@ -871,6 +886,19 @@ defmodule Explorer.Factory do
value: Enum.random(1..100_000),
block_timestamp: DateTime.utc_now()
}
|> Map.merge(transaction_factory_chain_type_fields())
end
case Application.compile_env(:explorer, :chain_type) do
:arbitrum ->
defp transaction_factory_chain_type_fields() do
%{
gas_used_for_l1: Enum.random(1..100_000)
}
end
_ ->
defp transaction_factory_chain_type_fields(), do: %{}
end
def transaction_to_verified_contract_factory do

@ -48,6 +48,7 @@ defmodule Indexer.Block.Fetcher do
alias Indexer.Transform.PolygonEdge.{DepositExecutes, Withdrawals}
alias Indexer.Transform.Arbitrum.Messaging, as: ArbitrumMessaging
alias Indexer.Transform.Shibarium.Bridge, as: ShibariumBridge
alias Indexer.Transform.Blocks, as: TransformBlocks
@ -171,6 +172,7 @@ defmodule Indexer.Block.Fetcher do
do: PolygonZkevmBridge.parse(blocks, logs),
else: []
),
arbitrum_xlevel_messages = ArbitrumMessaging.parse(transactions_with_receipts, logs),
%FetchedBeneficiaries{params_set: beneficiary_params_set, errors: beneficiaries_errors} =
fetch_beneficiaries(blocks, transactions_with_receipts, json_rpc_named_arguments),
addresses =
@ -226,7 +228,8 @@ defmodule Indexer.Block.Fetcher do
polygon_edge_withdrawals: polygon_edge_withdrawals,
polygon_edge_deposit_executes: polygon_edge_deposit_executes,
polygon_zkevm_bridge_operations: polygon_zkevm_bridge_operations,
shibarium_bridge_operations: shibarium_bridge_operations
shibarium_bridge_operations: shibarium_bridge_operations,
arbitrum_messages: arbitrum_xlevel_messages
},
{:ok, inserted} <-
__MODULE__.import(
@ -260,7 +263,8 @@ defmodule Indexer.Block.Fetcher do
polygon_edge_withdrawals: polygon_edge_withdrawals,
polygon_edge_deposit_executes: polygon_edge_deposit_executes,
polygon_zkevm_bridge_operations: polygon_zkevm_bridge_operations,
shibarium_bridge_operations: shibarium_bridge_operations
shibarium_bridge_operations: shibarium_bridge_operations,
arbitrum_messages: arbitrum_xlevel_messages
}) do
case Application.get_env(:explorer, :chain_type) do
:ethereum ->
@ -286,6 +290,10 @@ defmodule Indexer.Block.Fetcher do
basic_import_options
|> Map.put_new(:shibarium_bridge_operations, %{params: shibarium_bridge_operations})
:arbitrum ->
basic_import_options
|> Map.put_new(:arbitrum_messages, %{params: arbitrum_xlevel_messages})
_ ->
basic_import_options
end

@ -0,0 +1,295 @@
defmodule Indexer.Fetcher.Arbitrum.Messaging do
@moduledoc """
Provides functionality for filtering and handling messaging between Layer 1 (L1) and Layer 2 (L2) in the Arbitrum protocol.
This module is responsible for identifying and processing messages that are transmitted
between L1 and L2. It includes functions to filter incoming logs and transactions to
find those that represent messages moving between the layers, and to handle the data of
these messages appropriately.
"""
import EthereumJSONRPC, only: [quantity_to_integer: 1]
import Explorer.Helper, only: [decode_data: 2]
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_info: 1, log_debug: 1]
alias Indexer.Fetcher.Arbitrum.Utils.Db
require Logger
@l2_to_l1_event_unindexed_params [
:address,
{:uint, 256},
{:uint, 256},
{:uint, 256},
{:uint, 256},
:bytes
]
@type arbitrum_message :: %{
direction: :to_l2 | :from_l2,
message_id: non_neg_integer(),
originator_address: binary(),
originating_transaction_hash: binary(),
origination_timestamp: DateTime.t(),
originating_transaction_block_number: non_neg_integer(),
completion_transaction_hash: binary(),
status: :initiated | :sent | :confirmed | :relayed
}
@typep min_transaction :: %{
:hash => binary(),
:type => non_neg_integer(),
optional(:request_id) => non_neg_integer(),
optional(any()) => any()
}
@typep min_log :: %{
:data => binary(),
:index => non_neg_integer(),
:first_topic => binary(),
:second_topic => binary(),
:third_topic => binary(),
:fourth_topic => binary(),
:address_hash => binary(),
:transaction_hash => binary(),
:block_hash => binary(),
:block_number => non_neg_integer(),
optional(any()) => any()
}
@doc """
Filters a list of rollup transactions to identify L1-to-L2 messages and composes a map for each with the related message information.
This function filters through a list of rollup transactions, selecting those
with a non-nil `request_id`, indicating they are L1-to-L2 message completions.
These filtered transactions are then processed to construct a detailed message
structure for each.
## Parameters
- `transactions`: A list of rollup transaction entries.
- `report`: An optional boolean flag (default `true`) that, when `true`, logs
the number of processed L1-to-L2 messages if any are found.
## Returns
- A list of L1-to-L2 messages with detailed information and current status. Every
map in the list compatible with the database import operation. All messages in
this context are considered `:relayed` as they represent completed actions from
L1 to L2.
"""
@spec filter_l1_to_l2_messages(maybe_improper_list(min_transaction, [])) :: [arbitrum_message]
@spec filter_l1_to_l2_messages(maybe_improper_list(min_transaction, []), boolean()) :: [arbitrum_message]
def filter_l1_to_l2_messages(transactions, report \\ true)
when is_list(transactions) and is_boolean(report) do
messages =
transactions
|> Enum.filter(fn tx ->
tx[:request_id] != nil
end)
|> handle_filtered_l1_to_l2_messages()
if report && not (messages == []) do
log_info("#{length(messages)} completions of L1-to-L2 messages will be imported")
end
messages
end
@doc """
Filters logs for L2-to-L1 messages and composes a map for each with the related message information.
This function filters a list of logs to identify those representing L2-to-L1 messages.
It checks each log against the ArbSys contract address and the `L2ToL1Tx` event
signature to determine if it corresponds to an L2-to-L1 message.
## Parameters
- `logs`: A list of log entries.
## Returns
- A list of L2-to-L1 messages with detailed information and current status. Each map
in the list is compatible with the database import operation.
"""
@spec filter_l2_to_l1_messages(maybe_improper_list(min_log, [])) :: [arbitrum_message]
def filter_l2_to_l1_messages(logs) when is_list(logs) do
arbsys_contract = Application.get_env(:indexer, __MODULE__)[:arbsys_contract]
filtered_logs =
logs
|> Enum.filter(fn event ->
event.address_hash == arbsys_contract and event.first_topic == Db.l2_to_l1_event()
end)
handle_filtered_l2_to_l1_messages(filtered_logs)
end
@doc """
Processes a list of filtered rollup transactions representing L1-to-L2 messages, constructing a detailed message structure for each.
## Parameters
- `filtered_txs`: A list of rollup transaction entries, each representing an L1-to-L2
message transaction.
## Returns
- A list of L1-to-L2 messages with detailed information and current status. Every map
in the list compatible with the database import operation. All messages in this context
are considered `:relayed` as they represent completed actions from L1 to L2.
"""
@spec handle_filtered_l1_to_l2_messages(maybe_improper_list(min_transaction, [])) :: [arbitrum_message]
def handle_filtered_l1_to_l2_messages([]) do
[]
end
def handle_filtered_l1_to_l2_messages(filtered_txs) when is_list(filtered_txs) do
filtered_txs
|> Enum.map(fn tx ->
log_debug("L1 to L2 message #{tx.hash} found with the type #{tx.type}")
%{direction: :to_l2, message_id: tx.request_id, completion_transaction_hash: tx.hash, status: :relayed}
|> complete_to_params()
end)
end
@doc """
Processes a list of filtered logs representing L2-to-L1 messages, enriching and categorizing them based on their current state and optionally updating their execution status.
This function takes filtered log events, typically representing L2-to-L1 messages, and
processes each to construct a comprehensive message structure. It also determines the
status of each message by comparing its block number against the highest committed and
confirmed block numbers. If a `caller` module is provided, it further updates the
messages' execution status.
## Parameters
- `filtered_logs`: A list of log entries, each representing an L2-to-L1 message event.
- `caller`: An optional module that uses as a flag to determine if the discovered
should be checked for execution.
## Returns
- A list of L2-to-L1 messages with detailed information and current status, ready for
database import.
"""
@spec handle_filtered_l2_to_l1_messages([min_log]) :: [arbitrum_message]
@spec handle_filtered_l2_to_l1_messages([min_log], module()) :: [arbitrum_message]
def handle_filtered_l2_to_l1_messages(filtered_logs, caller \\ nil)
def handle_filtered_l2_to_l1_messages([], _) do
[]
end
def handle_filtered_l2_to_l1_messages(filtered_logs, caller) when is_list(filtered_logs) do
# Get values before the loop parsing the events to reduce number of DB requests
highest_committed_block = Db.highest_committed_block(-1)
highest_confirmed_block = Db.highest_confirmed_block(-1)
messages_map =
filtered_logs
|> Enum.reduce(%{}, fn event, messages_acc ->
log_debug("L2 to L1 message #{event.transaction_hash} found")
{message_id, caller, blocknum, timestamp} = l2_to_l1_event_parse(event)
message =
%{
direction: :from_l2,
message_id: message_id,
originator_address: caller,
originating_transaction_hash: event.transaction_hash,
origination_timestamp: timestamp,
originating_transaction_block_number: blocknum,
status: status_l2_to_l1_message(blocknum, highest_committed_block, highest_confirmed_block)
}
|> complete_to_params()
Map.put(
messages_acc,
message_id,
message
)
end)
log_info("Origins of #{length(Map.values(messages_map))} L2-to-L1 messages will be imported")
# The check if messages are executed is required only for the case when l2-to-l1
# messages are found by block catchup fetcher
updated_messages_map =
case caller do
nil ->
messages_map
_ ->
messages_map
|> find_and_update_executed_messages()
end
updated_messages_map
|> Map.values()
end
# Converts an incomplete message structure into a complete parameters map for database updates.
defp complete_to_params(incomplete) do
[
:direction,
:message_id,
:originator_address,
:originating_transaction_hash,
:origination_timestamp,
:originating_transaction_block_number,
:completion_transaction_hash,
:status
]
|> Enum.reduce(%{}, fn key, out ->
Map.put(out, key, Map.get(incomplete, key))
end)
end
# Parses an L2-to-L1 event, extracting relevant information from the event's data.
defp l2_to_l1_event_parse(event) do
[
caller,
arb_block_num,
_eth_block_num,
timestamp,
_callvalue,
_data
] = decode_data(event.data, @l2_to_l1_event_unindexed_params)
position = quantity_to_integer(event.fourth_topic)
{position, caller, arb_block_num, Timex.from_unix(timestamp)}
end
# Determines the status of an L2-to-L1 message based on its block number and the highest
# committed and confirmed block numbers.
defp status_l2_to_l1_message(msg_block, highest_committed_block, highest_confirmed_block) do
cond do
highest_confirmed_block >= msg_block -> :confirmed
highest_committed_block >= msg_block -> :sent
true -> :initiated
end
end
# Finds and updates the status of L2-to-L1 messages that have been executed on L1.
# This function iterates over the given messages, identifies those with corresponding L1 executions,
# and updates their `completion_transaction_hash` and `status` accordingly.
#
# ## Parameters
# - `messages`: A map where each key is a message ID, and each value is the message's details.
#
# ## Returns
# - The updated map of messages with the `completion_transaction_hash` and `status` fields updated
# for messages that have been executed.
defp find_and_update_executed_messages(messages) do
messages
|> Map.keys()
|> Db.l1_executions()
|> Enum.reduce(messages, fn execution, messages_acc ->
message =
messages_acc
|> Map.get(execution.message_id)
|> Map.put(:completion_transaction_hash, execution.execution_transaction.hash.bytes)
|> Map.put(:status, :relayed)
Map.put(messages_acc, execution.message_id, message)
end)
end
end

@ -0,0 +1,365 @@
defmodule Indexer.Fetcher.Arbitrum.RollupMessagesCatchup do
@moduledoc """
Manages the catch-up process for historical rollup messages between Layer 1 (L1) and Layer 2 (L2) within the Arbitrum network.
This module aims to discover historical messages that were not captured by the block
fetcher or the catch-up block fetcher. This situation arises during the upgrade of an
existing instance of BlockScout (BS) that already has indexed blocks but lacks
a crosschain messages discovery mechanism. Therefore, it becomes necessary to traverse
the already indexed blocks to extract crosschain messages contained within them.
The fetcher's operation cycle consists of five phases, initiated by sending specific
messages:
- `:wait_for_new_block`: Waits for the block fetcher to index new blocks before
proceeding with message discovery.
- `:init_worker`: Sets up the initial parameters for the message discovery process,
identifying the ending blocks for the search.
- `:historical_msg_from_l2` and `:historical_msg_to_l2`: Manage the discovery and
processing of messages sent from L2 to L1 and from L1 to L2, respectively.
- `:plan_next_iteration`: Schedules the next iteration of the catch-up process.
Workflow diagram of the fetcher state changes:
wait_for_new_block
|
V
init_worker
|
V
|-> historical_msg_from_l2 -> historical_msg_to_l2 -> plan_next_iteration ->|
|---------------------------------------------------------------------------|
`historical_msg_from_l2` discovers L2-to-L1 messages by analyzing logs from already
indexed rollup transactions. Logs representing the `L2ToL1Tx` event are utilized
to construct messages. The current rollup state, including information about
committed batches and confirmed blocks, is used to assign the appropriate status
to the messages before importing them into the database.
`historical_msg_to_l2` discovers L1-to-L2 messages by requesting rollup
transactions through RPC. Transactions containing a `requestId` in their body are
utilized to construct messages. These messages are marked as `:relayed`, indicating
that they have been successfully received on L2 and are considered completed, and
are then imported into the database. This approach is adopted because it parallels
the action of re-indexing existing transactions to include Arbitrum-specific fields,
which are absent in the currently indexed transactions. However, permanently adding
these fields to the database model for the sake of historical message catch-up is
impractical. Therefore, to avoid the extensive process of re-indexing and to
minimize changes to the database schema, fetching the required data directly from
an external node via RPC is preferred for historical message discovery.
"""
use GenServer
use Indexer.Fetcher
import Indexer.Fetcher.Arbitrum.Utils.Helper, only: [increase_duration: 2]
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_warning: 1]
alias Indexer.Fetcher.Arbitrum.Utils.Db
alias Indexer.Fetcher.Arbitrum.Workers.HistoricalMessagesOnL2
require Logger
@wait_for_new_block_delay 15
@release_cpu_delay 1
def child_spec(start_link_arguments) do
spec = %{
id: __MODULE__,
start: {__MODULE__, :start_link, start_link_arguments},
restart: :transient,
type: :worker
}
Supervisor.child_spec(spec, [])
end
def start_link(args, gen_server_options \\ []) do
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__))
end
@impl GenServer
def init(args) do
Logger.metadata(fetcher: :arbitrum_bridge_l2_catchup)
config_common = Application.get_all_env(:indexer)[Indexer.Fetcher.Arbitrum]
rollup_chunk_size = config_common[:rollup_chunk_size]
config_tracker = Application.get_all_env(:indexer)[__MODULE__]
recheck_interval = config_tracker[:recheck_interval]
messages_to_l2_blocks_depth = config_tracker[:messages_to_l2_blocks_depth]
messages_from_l2_blocks_depth = config_tracker[:messages_to_l1_blocks_depth]
Process.send(self(), :wait_for_new_block, [])
{:ok,
%{
config: %{
rollup_rpc: %{
json_rpc_named_arguments: args[:json_rpc_named_arguments],
chunk_size: rollup_chunk_size
},
json_l2_rpc_named_arguments: args[:json_rpc_named_arguments],
recheck_interval: recheck_interval,
messages_to_l2_blocks_depth: messages_to_l2_blocks_depth,
messages_from_l2_blocks_depth: messages_from_l2_blocks_depth
},
data: %{}
}}
end
@impl GenServer
def handle_info({ref, _result}, state) do
Process.demonitor(ref, [:flush])
{:noreply, state}
end
# Waits for the next new block to be picked up by the block fetcher before initiating
# the worker for message discovery.
#
# This function checks if a new block has been indexed by the block fetcher since
# the start of the historical messages fetcher. It queries the database to find
# the closest block timestamped after this period. If a new block is found, it
# initiates the worker process for message discovery by sending the `:init_worker`
# message. If no new block is available, it reschedules itself to check again after
# a specified delay.
#
# The number of the new block indexed by the block fetcher will be used by the worker
# initializer to establish the end of the range where new messages should be discovered.
#
# ## Parameters
# - `:wait_for_new_block`: The message that triggers the waiting process.
# - `state`: The current state of the fetcher.
#
# ## Returns
# - `{:noreply, new_state}` where the new indexed block number is stored, or retain
# the current state while awaiting new blocks.
@impl GenServer
def handle_info(:wait_for_new_block, %{data: _} = state) do
{time_of_start, interim_data} =
if is_nil(Map.get(state.data, :time_of_start)) do
now = DateTime.utc_now()
updated_data = Map.put(state.data, :time_of_start, now)
{now, updated_data}
else
{state.data.time_of_start, state.data}
end
new_data =
case Db.closest_block_after_timestamp(time_of_start) do
{:ok, block} ->
Process.send(self(), :init_worker, [])
interim_data
|> Map.put(:new_block, block)
|> Map.delete(:time_of_start)
{:error, _} ->
log_warning("No progress of the block fetcher found")
Process.send_after(self(), :wait_for_new_block, :timer.seconds(@wait_for_new_block_delay))
interim_data
end
{:noreply, %{state | data: new_data}}
end
# Sets the initial parameters for discovering historical messages. This function
# calculates the end blocks for both L1-to-L2 and L2-to-L1 message discovery
# processes based on th earliest messages already indexed. If no messages are
# available, the block number before the latest indexed block will be used.
# These end blocks are used to initiate the discovery process in subsequent iterations.
#
# After identifying the initial values, the function immediately transitions to
# the L2-to-L1 message discovery process by sending the `:historical_msg_from_l2`
# message.
#
# ## Parameters
# - `:init_worker`: The message that triggers the handler.
# - `state`: The current state of the fetcher.
#
# ## Returns
# - `{:noreply, new_state}` where the end blocks for both L1-to-L2 and L2-to-L1
# message discovery are established.
@impl GenServer
def handle_info(:init_worker, %{data: _} = state) do
historical_msg_from_l2_end_block = Db.rollup_block_to_discover_missed_messages_from_l2(state.data.new_block - 1)
historical_msg_to_l2_end_block = Db.rollup_block_to_discover_missed_messages_to_l2(state.data.new_block - 1)
Process.send(self(), :historical_msg_from_l2, [])
new_data =
Map.merge(state.data, %{
duration: 0,
progressed: false,
historical_msg_from_l2_end_block: historical_msg_from_l2_end_block,
historical_msg_to_l2_end_block: historical_msg_to_l2_end_block
})
{:noreply, %{state | data: new_data}}
end
# Processes the next iteration of historical L2-to-L1 message discovery.
#
# This function uses the results from the previous iteration to set the end block
# for the current message discovery iteration. It identifies the start block and
# requests rollup logs within the specified range to explore `L2ToL1Tx` events.
# Discovered events are used to compose messages to be stored in the database.
# Before being stored in the database, each message is assigned the appropriate
# status based on the current state of the rollup.
#
# After importing the messages, the function immediately switches to the process
# of L1-to-L2 message discovery for the next range of blocks by sending
# the `:historical_msg_to_l2` message.
#
# ## Parameters
# - `:historical_msg_from_l2`: The message triggering the handler.
# - `state`: The current state of the fetcher containing necessary data like
# the end block identified after the previous iteration of historical
# message discovery from L2.
#
# ## Returns
# - `{:noreply, new_state}` where the end block for the next L2-to-L1 message
# discovery iteration is updated based on the results of the current iteration.
@impl GenServer
def handle_info(
:historical_msg_from_l2,
%{
data: %{duration: _, historical_msg_from_l2_end_block: _, progressed: _}
} = state
) do
end_block = state.data.historical_msg_from_l2_end_block
{handle_duration, {:ok, start_block}} =
:timer.tc(&HistoricalMessagesOnL2.discover_historical_messages_from_l2/2, [end_block, state])
Process.send(self(), :historical_msg_to_l2, [])
progressed = state.data.progressed || (not is_nil(start_block) && start_block - 1 < end_block)
new_data =
Map.merge(state.data, %{
duration: increase_duration(state.data, handle_duration),
progressed: progressed,
historical_msg_from_l2_end_block: if(is_nil(start_block), do: nil, else: start_block - 1)
})
{:noreply, %{state | data: new_data}}
end
# Processes the next iteration of historical L1-to-L2 message discovery.
#
# This function uses the results from the previous iteration to set the end block for
# the current message discovery iteration. It identifies the start block and requests
# rollup blocks within the specified range through RPC to explore transactions
# containing a `requestId` in their body. This RPC request is necessary because the
# `requestId` field is not present in the transaction model of already indexed
# transactions in the database. The discovered transactions are then used to construct
# messages, which are subsequently stored in the database. These imported messages are
# marked as `:relayed`, signifying that they represent completed actions from L1 to L2.
#
# After importing the messages, the function immediately switches to the process
# of choosing a delay prior to the next iteration of historical messages discovery
# by sending the `:plan_next_iteration` message.
#
# ## Parameters
# - `:historical_msg_to_l2`: The message triggering the handler.
# - `state`: The current state of the fetcher containing necessary data, like the end
# block identified after the previous iteration of historical message discovery.
#
# ## Returns
# - `{:noreply, new_state}` where the end block for the next L1-to-L2 message discovery
# iteration is updated based on the results of the current iteration.
@impl GenServer
def handle_info(
:historical_msg_to_l2,
%{
data: %{duration: _, historical_msg_to_l2_end_block: _, progressed: _}
} = state
) do
end_block = state.data.historical_msg_to_l2_end_block
{handle_duration, {:ok, start_block}} =
:timer.tc(&HistoricalMessagesOnL2.discover_historical_messages_to_l2/2, [end_block, state])
Process.send(self(), :plan_next_iteration, [])
progressed = state.data.progressed || (not is_nil(start_block) && start_block - 1 < end_block)
new_data =
Map.merge(state.data, %{
duration: increase_duration(state.data, handle_duration),
progressed: progressed,
historical_msg_to_l2_end_block: if(is_nil(start_block), do: nil, else: start_block - 1)
})
{:noreply, %{state | data: new_data}}
end
# Decides whether to stop or continue the fetcher based on the current state of message discovery.
#
# If both `historical_msg_from_l2_end_block` and `historical_msg_to_l2_end_block` are 0 or less,
# indicating that there are no more historical messages to fetch, the task is stopped with a normal
# termination.
#
# ## Parameters
# - `:plan_next_iteration`: The message that triggers this function.
# - `state`: The current state of the fetcher.
#
# ## Returns
# - `{:stop, :normal, state}`: Ends the fetcher's operation cleanly.
@impl GenServer
def handle_info(
:plan_next_iteration,
%{
data: %{
historical_msg_from_l2_end_block: from_l2_end_block,
historical_msg_to_l2_end_block: to_l2_end_block
}
} = state
)
when from_l2_end_block <= 0 and to_l2_end_block <= 0 do
{:stop, :normal, state}
end
# Plans the next iteration for the historical messages discovery based on the state's `progressed` flag.
#
# If no progress was made (`progressed` is false), schedules the next check based
# on the `recheck_interval`, adjusted by the time already spent. If progress was
# made, it imposes a shorter delay to quickly check again, helping to reduce CPU
# usage during idle periods.
#
# The chosen delay is used to schedule the next iteration of historical messages discovery
# by sending `:historical_msg_from_l2`.
#
# ## Parameters
# - `:plan_next_iteration`: The message that triggers this function.
# - `state`: The current state of the fetcher containing both the fetcher configuration
# and data needed to determine the next steps.
#
# ## Returns
# - `{:noreply, state}` where `state` contains the reset `duration` of the iteration and
# the flag if the messages discovery process `progressed`.
@impl GenServer
def handle_info(
:plan_next_iteration,
%{config: %{recheck_interval: _}, data: %{duration: _, progressed: _}} = state
) do
next_timeout =
if state.data.progressed do
# For the case when all historical messages are not received yet
# make a small delay to release CPU a bit
:timer.seconds(@release_cpu_delay)
else
max(state.config.recheck_interval - div(state.data.duration, 1000), 0)
end
Process.send_after(self(), :historical_msg_from_l2, next_timeout)
new_data =
state.data
|> Map.put(:duration, 0)
|> Map.put(:progressed, false)
{:noreply, %{state | data: new_data}}
end
end

@ -0,0 +1,459 @@
defmodule Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses do
@moduledoc """
Manages the tracking and updating of the statuses of rollup batches, confirmations, and cross-chain message executions for an Arbitrum rollup.
This module orchestrates the workflow for discovering new and historical
batches of rollup transactions, confirmations of rollup blocks, and
executions of L2-to-L1 messages. It ensures the accurate tracking and
updating of the rollup process stages.
The fetcher's operation cycle begins with the `:init_worker` message, which
establishes the initial state with the necessary configuration.
The process then progresses through a sequence of steps, each triggered by
specific messages:
- `:check_new_batches`: Discovers new batches of rollup transactions and
updates their statuses.
- `:check_new_confirmations`: Identifies new confirmations of rollup blocks
to update their statuses.
- `:check_new_executions`: Finds new executions of L2-to-L1 messages to
update their statuses.
- `:check_historical_batches`: Processes historical batches of rollup
transactions.
- `:check_historical_confirmations`: Handles historical confirmations of
rollup blocks.
- `:check_historical_executions`: Manages historical executions of L2-to-L1
messages.
- `:check_lifecycle_txs_finalization`: Finalizes the status of lifecycle
transactions, confirming the blocks and messages involved.
Discovery of rollup transaction batches is executed by requesting logs on L1
that correspond to the `SequencerBatchDelivered` event emitted by the
Arbitrum `SequencerInbox` contract.
Discovery of rollup block confirmations is executed by requesting logs on L1
that correspond to the `SendRootUpdated` event emitted by the Arbitrum
`Outbox` contract.
Discovery of the L2-to-L1 message executions occurs by requesting logs on L1
that correspond to the `OutBoxTransactionExecuted` event emitted by the
Arbitrum `Outbox` contract.
When processing batches or confirmations, the L2-to-L1 messages included in
the corresponding rollup blocks are updated to reflect their status changes.
"""
use GenServer
use Indexer.Fetcher
alias Indexer.Fetcher.Arbitrum.Workers.{L1Finalization, NewBatches, NewConfirmations, NewL1Executions}
import Indexer.Fetcher.Arbitrum.Utils.Helper, only: [increase_duration: 2]
alias Indexer.Helper, as: IndexerHelper
alias Indexer.Fetcher.Arbitrum.Utils.{Db, Rpc}
require Logger
def child_spec(start_link_arguments) do
spec = %{
id: __MODULE__,
start: {__MODULE__, :start_link, start_link_arguments},
restart: :transient,
type: :worker
}
Supervisor.child_spec(spec, [])
end
def start_link(args, gen_server_options \\ []) do
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__))
end
@impl GenServer
def init(args) do
Logger.metadata(fetcher: :arbitrum_batches_tracker)
config_common = Application.get_all_env(:indexer)[Indexer.Fetcher.Arbitrum]
l1_rpc = config_common[:l1_rpc]
l1_rpc_block_range = config_common[:l1_rpc_block_range]
l1_rollup_address = config_common[:l1_rollup_address]
l1_rollup_init_block = config_common[:l1_rollup_init_block]
l1_start_block = config_common[:l1_start_block]
l1_rpc_chunk_size = config_common[:l1_rpc_chunk_size]
rollup_chunk_size = config_common[:rollup_chunk_size]
config_tracker = Application.get_all_env(:indexer)[__MODULE__]
recheck_interval = config_tracker[:recheck_interval]
messages_to_blocks_shift = config_tracker[:messages_to_blocks_shift]
track_l1_tx_finalization = config_tracker[:track_l1_tx_finalization]
finalized_confirmations = config_tracker[:finalized_confirmations]
confirmation_batches_depth = config_tracker[:confirmation_batches_depth]
new_batches_limit = config_tracker[:new_batches_limit]
Process.send(self(), :init_worker, [])
{:ok,
%{
config: %{
l1_rpc: %{
json_rpc_named_arguments: IndexerHelper.json_rpc_named_arguments(l1_rpc),
logs_block_range: l1_rpc_block_range,
chunk_size: l1_rpc_chunk_size,
track_finalization: track_l1_tx_finalization,
finalized_confirmations: finalized_confirmations
},
rollup_rpc: %{
json_rpc_named_arguments: args[:json_rpc_named_arguments],
chunk_size: rollup_chunk_size
},
recheck_interval: recheck_interval,
l1_rollup_address: l1_rollup_address,
l1_start_block: l1_start_block,
l1_rollup_init_block: l1_rollup_init_block,
new_batches_limit: new_batches_limit,
messages_to_blocks_shift: messages_to_blocks_shift,
confirmation_batches_depth: confirmation_batches_depth
},
data: %{}
}}
end
@impl GenServer
def handle_info({ref, _result}, state) do
Process.demonitor(ref, [:flush])
{:noreply, state}
end
# Initializes the worker for discovering batches of rollup transactions, confirmations of rollup blocks, and executions of L2-to-L1 messages.
#
# This function sets up the initial state for the fetcher, identifying the
# starting blocks for new and historical discoveries of batches, confirmations,
# and executions. It also retrieves addresses for the Arbitrum Outbox and
# SequencerInbox contracts.
#
# After initializing these parameters, it immediately sends `:check_new_batches`
# to commence the fetcher loop.
#
# ## Parameters
# - `:init_worker`: The message triggering the initialization.
# - `state`: The current state of the process, containing initial configuration
# data.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with Arbitrum contract
# addresses and starting blocks for new and historical discoveries.
@impl GenServer
def handle_info(
:init_worker,
%{
config: %{
l1_rpc: %{json_rpc_named_arguments: json_l1_rpc_named_arguments},
l1_rollup_address: l1_rollup_address
}
} = state
) do
%{outbox: outbox_address, sequencer_inbox: sequencer_inbox_address} =
Rpc.get_contracts_for_rollup(
l1_rollup_address,
:inbox_outbox,
json_l1_rpc_named_arguments
)
l1_start_block = Rpc.get_l1_start_block(state.config.l1_start_block, json_l1_rpc_named_arguments)
# TODO: it is necessary to develop a way to discover missed batches to cover the case
# when the batch #1, #2 and #4 are in DB, but #3 is not
# One of the approaches is to look deeper than the latest committed batch and
# check whether batches were already handled or not.
new_batches_start_block = Db.l1_block_to_discover_latest_committed_batch(l1_start_block)
historical_batches_end_block = Db.l1_block_to_discover_earliest_committed_batch(l1_start_block - 1)
new_confirmations_start_block = Db.l1_block_of_latest_confirmed_block(l1_start_block)
# TODO: it is necessary to develop a way to discover missed executions.
# One of the approaches is to look deeper than the latest execution and
# check whether executions were already handled or not.
new_executions_start_block = Db.l1_block_to_discover_latest_execution(l1_start_block)
historical_executions_end_block = Db.l1_block_to_discover_earliest_execution(l1_start_block - 1)
Process.send(self(), :check_new_batches, [])
new_state =
state
|> Map.put(
:config,
Map.merge(state.config, %{
l1_start_block: l1_start_block,
l1_outbox_address: outbox_address,
l1_sequencer_inbox_address: sequencer_inbox_address
})
)
|> Map.put(
:data,
Map.merge(state.data, %{
new_batches_start_block: new_batches_start_block,
historical_batches_end_block: historical_batches_end_block,
new_confirmations_start_block: new_confirmations_start_block,
historical_confirmations_end_block: nil,
historical_confirmations_start_block: nil,
new_executions_start_block: new_executions_start_block,
historical_executions_end_block: historical_executions_end_block
})
)
{:noreply, new_state}
end
# Initiates the process of discovering and handling new batches of rollup transactions.
#
# This function fetches logs within the calculated L1 block range to identify new
# batches of rollup transactions. The discovered batches and their corresponding
# rollup blocks and transactions are processed and linked. The L2-to-L1 messages
# included in these rollup blocks are also updated to reflect their commitment.
#
# After processing, it immediately transitions to checking new confirmations of
# rollup blocks by sending the `:check_new_confirmations` message.
#
# ## Parameters
# - `:check_new_batches`: The message that triggers the function.
# - `state`: The current state of the fetcher, containing configuration and data
# needed for the discovery of new batches.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with the new start block for
# the next iteration of new batch discovery.
@impl GenServer
def handle_info(:check_new_batches, state) do
{handle_duration, {:ok, end_block}} = :timer.tc(&NewBatches.discover_new_batches/1, [state])
Process.send(self(), :check_new_confirmations, [])
new_data =
Map.merge(state.data, %{
duration: increase_duration(state.data, handle_duration),
new_batches_start_block: end_block + 1
})
{:noreply, %{state | data: new_data}}
end
# Initiates the discovery and processing of new confirmations for rollup blocks.
#
# This function fetches logs within the calculated L1 block range to identify
# new confirmations for rollup blocks. The discovered confirmations are
# processed to update the status of rollup blocks and L2-to-L1 messages
# accordingly.
#
# After processing, it immediately transitions to discovering new executions
# of L2-to-L1 messages by sending the `:check_new_executions` message.
#
# ## Parameters
# - `:check_new_confirmations`: The message that triggers the function.
# - `state`: The current state of the fetcher, containing configuration and
# data needed for the discovery of new rollup block confirmations.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with the new start
# block for the next iteration of new confirmation discovery.
@impl GenServer
def handle_info(:check_new_confirmations, state) do
{handle_duration, {retcode, end_block}} = :timer.tc(&NewConfirmations.discover_new_rollup_confirmation/1, [state])
Process.send(self(), :check_new_executions, [])
updated_fields =
case retcode do
:ok -> %{}
_ -> %{historical_confirmations_end_block: nil, historical_confirmations_start_block: nil}
end
|> Map.merge(%{
# credo:disable-for-previous-line Credo.Check.Refactor.PipeChainStart
duration: increase_duration(state.data, handle_duration),
new_confirmations_start_block: end_block + 1
})
new_data = Map.merge(state.data, updated_fields)
{:noreply, %{state | data: new_data}}
end
# Initiates the process of discovering and handling new executions for L2-to-L1 messages.
#
# This function identifies new executions of L2-to-L1 messages by fetching logs
# for the calculated L1 block range. It updates the status of these messages and
# links them with the corresponding lifecycle transactions.
#
# After processing, it immediately transitions to checking historical batches of
# rollup transaction by sending the `:check_historical_batches` message.
#
# ## Parameters
# - `:check_new_executions`: The message that triggers the function.
# - `state`: The current state of the fetcher, containing configuration and data
# needed for the discovery of new message executions.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with the new start
# block for the next iteration of new message executions discovery.
@impl GenServer
def handle_info(:check_new_executions, state) do
{handle_duration, {:ok, end_block}} = :timer.tc(&NewL1Executions.discover_new_l1_messages_executions/1, [state])
Process.send(self(), :check_historical_batches, [])
new_data =
Map.merge(state.data, %{
duration: increase_duration(state.data, handle_duration),
new_executions_start_block: end_block + 1
})
{:noreply, %{state | data: new_data}}
end
# Initiates the process of discovering and handling historical batches of rollup transactions.
#
# This function fetches logs within the calculated L1 block range to identify the
# historical batches of rollup transactions. After discovery the linkage between
# batches and the corresponding rollup blocks and transactions are build. The
# status of the L2-to-L1 messages included in the corresponding rollup blocks is
# also updated.
#
# After processing, it immediately transitions to checking historical
# confirmations of rollup blocks by sending the `:check_historical_confirmations`
# message.
#
# ## Parameters
# - `:check_historical_batches`: The message that triggers the function.
# - `state`: The current state of the fetcher, containing configuration and data
# needed for the discovery of historical batches.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with the new end block
# for the next iteration of historical batch discovery.
@impl GenServer
def handle_info(:check_historical_batches, state) do
{handle_duration, {:ok, start_block}} = :timer.tc(&NewBatches.discover_historical_batches/1, [state])
Process.send(self(), :check_historical_confirmations, [])
new_data =
Map.merge(state.data, %{
duration: increase_duration(state.data, handle_duration),
historical_batches_end_block: start_block - 1
})
{:noreply, %{state | data: new_data}}
end
# Initiates the process of discovering and handling historical confirmations of rollup blocks.
#
# This function fetches logs within the calculated range to identify the
# historical confirmations of rollup blocks. The discovered confirmations are
# processed to update the status of rollup blocks and L2-to-L1 messages
# accordingly.
#
# After processing, it immediately transitions to checking historical executions
# of L2-to-L1 messages by sending the `:check_historical_executions` message.
#
# ## Parameters
# - `:check_historical_confirmations`: The message that triggers the function.
# - `state`: The current state of the fetcher, containing configuration and data
# needed for the discovery of historical confirmations.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with the new start and
# end blocks for the next iteration of historical confirmations discovery.
@impl GenServer
def handle_info(:check_historical_confirmations, state) do
{handle_duration, {retcode, {start_block, end_block}}} =
:timer.tc(&NewConfirmations.discover_historical_rollup_confirmation/1, [state])
Process.send(self(), :check_historical_executions, [])
updated_fields =
case retcode do
:ok -> %{historical_confirmations_end_block: start_block - 1, historical_confirmations_start_block: end_block}
_ -> %{historical_confirmations_end_block: nil, historical_confirmations_start_block: nil}
end
|> Map.merge(%{
# credo:disable-for-previous-line Credo.Check.Refactor.PipeChainStart
duration: increase_duration(state.data, handle_duration)
})
new_data = Map.merge(state.data, updated_fields)
{:noreply, %{state | data: new_data}}
end
# Initiates the discovery and handling of historical L2-to-L1 message executions.
#
# This function discovers historical executions of L2-to-L1 messages by retrieving
# logs within a specified L1 block range. It updates their status accordingly and
# builds the link between the messages and the lifecycle transactions where they
# are executed.
#
# After processing, it immediately transitions to finalizing lifecycle transactions
# by sending the `:check_lifecycle_txs_finalization` message.
#
# ## Parameters
# - `:check_historical_executions`: The message that triggers the function.
# - `state`: The current state of the fetcher, containing configuration and data
# needed for the discovery of historical executions.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with the new end block for
# the next iteration of historical executions.
@impl GenServer
def handle_info(:check_historical_executions, state) do
{handle_duration, {:ok, start_block}} =
:timer.tc(&NewL1Executions.discover_historical_l1_messages_executions/1, [state])
Process.send(self(), :check_lifecycle_txs_finalization, [])
new_data =
Map.merge(state.data, %{
duration: increase_duration(state.data, handle_duration),
historical_executions_end_block: start_block - 1
})
{:noreply, %{state | data: new_data}}
end
# Handles the periodic finalization check of lifecycle transactions.
#
# This function updates the finalization status of lifecycle transactions based on
# the current state of the L1 blockchain. It discovers all transactions that are not
# yet finalized up to the `safe` L1 block and changes their status to `:finalized`.
#
# After processing, as the final handler in the loop, it schedules the
# `:check_new_batches` message to initiate the next iteration. The scheduling of this
# message is delayed to account for the time spent on the previous handlers' execution.
#
# ## Parameters
# - `:check_lifecycle_txs_finalization`: The message that triggers the function.
# - `state`: The current state of the fetcher, containing the configuration needed for
# the lifecycle transactions status update.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is the updated state with the reset duration.
@impl GenServer
def handle_info(:check_lifecycle_txs_finalization, state) do
{handle_duration, _} =
if state.config.l1_rpc.track_finalization do
:timer.tc(&L1Finalization.monitor_lifecycle_txs/1, [state])
else
{0, nil}
end
next_timeout = max(state.config.recheck_interval - div(increase_duration(state.data, handle_duration), 1000), 0)
Process.send_after(self(), :check_new_batches, next_timeout)
new_data =
Map.merge(state.data, %{
duration: 0
})
{:noreply, %{state | data: new_data}}
end
end

@ -0,0 +1,223 @@
defmodule Indexer.Fetcher.Arbitrum.TrackingMessagesOnL1 do
@moduledoc """
Manages the tracking and processing of new and historical cross-chain messages initiated on L1 for an Arbitrum rollup.
This module is responsible for continuously monitoring and importing new messages
initiated from Layer 1 (L1) to Arbitrum's Layer 2 (L2), as well as discovering
and processing historical messages that were sent previously but have not yet
been processed.
The fetcher's operation is divided into 3 phases, each initiated by sending
specific messages:
- `:init_worker`: Initializes the worker with the required configuration for message
tracking.
- `:check_new_msgs_to_rollup`: Processes new L1-to-L2 messages appearing on L1 as
the blockchain progresses.
- `:check_historical_msgs_to_rollup`: Retrieves historical L1-to-L2 messages that
were missed if the message synchronization process did not start from the
Arbitrum rollup's inception.
While the `:init_worker` message is sent only once during the fetcher startup,
the subsequent sending of `:check_new_msgs_to_rollup` and
`:check_historical_msgs_to_rollup` forms the operation cycle of the fetcher.
Discovery of L1-to-L2 messages is executed by requesting logs on L1 that correspond
to the `MessageDelivered` event emitted by the Arbitrum bridge contract.
Cross-chain messages are composed of information from the logs' data as well as from
the corresponding transaction details. To get the transaction details, RPC calls
`eth_getTransactionByHash` are made in chunks.
"""
use GenServer
use Indexer.Fetcher
import Indexer.Fetcher.Arbitrum.Utils.Helper, only: [increase_duration: 2]
alias Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2
alias Indexer.Helper, as: IndexerHelper
alias Indexer.Fetcher.Arbitrum.Utils.{Db, Rpc}
require Logger
def child_spec(start_link_arguments) do
spec = %{
id: __MODULE__,
start: {__MODULE__, :start_link, start_link_arguments},
restart: :transient,
type: :worker
}
Supervisor.child_spec(spec, [])
end
def start_link(args, gen_server_options \\ []) do
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__))
end
@impl GenServer
def init(args) do
Logger.metadata(fetcher: :arbitrum_bridge_l1)
config_common = Application.get_all_env(:indexer)[Indexer.Fetcher.Arbitrum]
l1_rpc = config_common[:l1_rpc]
l1_rpc_block_range = config_common[:l1_rpc_block_range]
l1_rollup_address = config_common[:l1_rollup_address]
l1_rollup_init_block = config_common[:l1_rollup_init_block]
l1_start_block = config_common[:l1_start_block]
l1_rpc_chunk_size = config_common[:l1_rpc_chunk_size]
config_tracker = Application.get_all_env(:indexer)[__MODULE__]
recheck_interval = config_tracker[:recheck_interval]
Process.send(self(), :init_worker, [])
{:ok,
%{
config: %{
json_l2_rpc_named_arguments: args[:json_rpc_named_arguments],
json_l1_rpc_named_arguments: IndexerHelper.json_rpc_named_arguments(l1_rpc),
recheck_interval: recheck_interval,
l1_rpc_chunk_size: l1_rpc_chunk_size,
l1_rpc_block_range: l1_rpc_block_range,
l1_rollup_address: l1_rollup_address,
l1_start_block: l1_start_block,
l1_rollup_init_block: l1_rollup_init_block
},
data: %{}
}}
end
@impl GenServer
def handle_info({ref, _result}, state) do
Process.demonitor(ref, [:flush])
{:noreply, state}
end
# Initializes the worker for discovering new and historical L1-to-L2 messages.
#
# This function prepares the initial parameters for the message discovery process.
# It fetches the Arbitrum bridge address and determines the starting block for
# new message discovery. If the starting block is not configured (set to a default
# value), the latest block number from L1 is used as the start. It also calculates
# the end block for historical message discovery.
#
# After setting these parameters, it immediately transitions to discovering new
# messages by sending the `:check_new_msgs_to_rollup` message.
#
# ## Parameters
# - `:init_worker`: The message triggering the initialization.
# - `state`: The current state of the process, containing configuration for data
# initialization and further message discovery.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` is updated with the bridge address,
# determined start block for new messages, and calculated end block for
# historical messages.
@impl GenServer
def handle_info(
:init_worker,
%{config: %{l1_rollup_address: _, json_l1_rpc_named_arguments: _, l1_start_block: _}, data: _} = state
) do
%{bridge: bridge_address} =
Rpc.get_contracts_for_rollup(state.config.l1_rollup_address, :bridge, state.config.json_l1_rpc_named_arguments)
l1_start_block = Rpc.get_l1_start_block(state.config.l1_start_block, state.config.json_l1_rpc_named_arguments)
new_msg_to_l2_start_block = Db.l1_block_to_discover_latest_message_to_l2(l1_start_block)
historical_msg_to_l2_end_block = Db.l1_block_to_discover_earliest_message_to_l2(l1_start_block - 1)
Process.send(self(), :check_new_msgs_to_rollup, [])
new_state =
state
|> Map.put(
:config,
Map.merge(state.config, %{
l1_start_block: l1_start_block,
l1_bridge_address: bridge_address
})
)
|> Map.put(
:data,
Map.merge(state.data, %{
new_msg_to_l2_start_block: new_msg_to_l2_start_block,
historical_msg_to_l2_end_block: historical_msg_to_l2_end_block
})
)
{:noreply, new_state}
end
# Initiates the process to discover and handle new L1-to-L2 messages initiated from L1.
#
# This function discovers new messages from L1 to L2 by retrieving logs for the
# calculated L1 block range. Discovered events are used to compose messages, which
# are then stored in the database.
#
# After processing, the function immediately transitions to discovering historical
# messages by sending the `:check_historical_msgs_to_rollup` message.
#
# ## Parameters
# - `:check_new_msgs_to_rollup`: The message that triggers the handler.
# - `state`: The current state of the fetcher, containing configuration and data
# needed for message discovery.
#
# ## Returns
# - `{:noreply, new_state}` where the starting block for the next new L1-to-L2
# message discovery iteration is updated based on the results of the current
# iteration.
@impl GenServer
def handle_info(:check_new_msgs_to_rollup, %{data: _} = state) do
{handle_duration, {:ok, end_block}} =
:timer.tc(&NewMessagesToL2.discover_new_messages_to_l2/1, [
state
])
Process.send(self(), :check_historical_msgs_to_rollup, [])
new_data =
Map.merge(state.data, %{
duration: increase_duration(state.data, handle_duration),
new_msg_to_l2_start_block: end_block + 1
})
{:noreply, %{state | data: new_data}}
end
# Initiates the process to discover and handle historical L1-to-L2 messages initiated from L1.
#
# This function discovers historical messages by retrieving logs for a calculated L1 block range.
# The discovered events are then used to compose messages to be stored in the database.
#
# After processing, as it is the final handler in the loop, it schedules the
# `:check_new_msgs_to_rollup` message to initiate the next iteration. The scheduling of this
# message is delayed, taking into account the time spent on the previous handler's execution.
#
# ## Parameters
# - `:check_historical_msgs_to_rollup`: The message that triggers the handler.
# - `state`: The current state of the fetcher, containing configuration and data needed for
# message discovery.
#
# ## Returns
# - `{:noreply, new_state}` where the end block for the next L1-to-L2 message discovery
# iteration is updated based on the results of the current iteration.
@impl GenServer
def handle_info(:check_historical_msgs_to_rollup, %{config: %{recheck_interval: _}, data: _} = state) do
{handle_duration, {:ok, start_block}} =
:timer.tc(&NewMessagesToL2.discover_historical_messages_to_l2/1, [
state
])
next_timeout = max(state.config.recheck_interval - div(increase_duration(state.data, handle_duration), 1000), 0)
Process.send_after(self(), :check_new_msgs_to_rollup, next_timeout)
new_data =
Map.merge(state.data, %{
duration: 0,
historical_msg_to_l2_end_block: start_block - 1
})
{:noreply, %{state | data: new_data}}
end
end

@ -0,0 +1,787 @@
defmodule Indexer.Fetcher.Arbitrum.Utils.Db do
@moduledoc """
Common functions to simplify DB routines for Indexer.Fetcher.Arbitrum fetchers
"""
import Ecto.Query, only: [from: 2]
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_warning: 1]
alias Explorer.{Chain, Repo}
alias Explorer.Chain.Arbitrum.Reader
alias Explorer.Chain.Block, as: FullBlock
alias Explorer.Chain.{Data, Hash, Log}
alias Explorer.Utility.MissingBlockRange
require Logger
# 32-byte signature of the event L2ToL1Tx(address caller, address indexed destination, uint256 indexed hash, uint256 indexed position, uint256 arbBlockNum, uint256 ethBlockNum, uint256 timestamp, uint256 callvalue, bytes data)
@l2_to_l1_event "0x3e7aafa77dbf186b7fd488006beff893744caa3c4f6f299e8a709fa2087374fc"
@doc """
Indexes L1 transactions provided in the input map. For transactions that
are already in the database, existing indices are taken. For new transactions,
the next available indices are assigned.
## Parameters
- `new_l1_txs`: A map of L1 transaction descriptions. The keys of the map are
transaction hashes.
## Returns
- `l1_txs`: A map of L1 transaction descriptions. Each element is extended with
the key `:id`, representing the index of the L1 transaction in the
`arbitrum_lifecycle_l1_transactions` table.
"""
@spec get_indices_for_l1_transactions(map()) :: map()
# TODO: consider a way to remove duplicate with ZkSync.Utils.Db
# credo:disable-for-next-line Credo.Check.Design.DuplicatedCode
def get_indices_for_l1_transactions(new_l1_txs)
when is_map(new_l1_txs) do
# Get indices for l1 transactions previously handled
l1_txs =
new_l1_txs
|> Map.keys()
|> Reader.lifecycle_transactions()
|> Enum.reduce(new_l1_txs, fn {hash, id}, txs ->
{_, txs} =
Map.get_and_update!(txs, hash.bytes, fn l1_tx ->
{l1_tx, Map.put(l1_tx, :id, id)}
end)
txs
end)
# Get the next index for the first new transaction based
# on the indices existing in DB
l1_tx_next_id = Reader.next_lifecycle_transaction_id()
# Assign new indices for the transactions which are not in
# the l1 transactions table yet
{updated_l1_txs, _} =
l1_txs
|> Map.keys()
|> Enum.reduce(
{l1_txs, l1_tx_next_id},
fn hash, {txs, next_id} ->
tx = txs[hash]
id = Map.get(tx, :id)
if is_nil(id) do
{Map.put(txs, hash, Map.put(tx, :id, next_id)), next_id + 1}
else
{txs, next_id}
end
end
)
updated_l1_txs
end
@doc """
Calculates the next L1 block number to search for the latest committed batch.
## Parameters
- `value_if_nil`: The default value to return if no committed batch is found.
## Returns
- The next L1 block number after the latest committed batch or `value_if_nil` if no committed batches are found.
"""
@spec l1_block_to_discover_latest_committed_batch(FullBlock.block_number() | nil) :: FullBlock.block_number() | nil
def l1_block_to_discover_latest_committed_batch(value_if_nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.l1_block_of_latest_committed_batch() do
nil ->
log_warning("No committed batches found in DB")
value_if_nil
value ->
value + 1
end
end
@doc """
Calculates the L1 block number to start the search for committed batches that precede
the earliest batch already discovered.
## Parameters
- `value_if_nil`: The default value to return if no committed batch is found.
## Returns
- The L1 block number immediately preceding the earliest committed batch,
or `value_if_nil` if no committed batches are found.
"""
@spec l1_block_to_discover_earliest_committed_batch(nil | FullBlock.block_number()) :: nil | FullBlock.block_number()
def l1_block_to_discover_earliest_committed_batch(value_if_nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.l1_block_of_earliest_committed_batch() do
nil ->
log_warning("No committed batches found in DB")
value_if_nil
value ->
value - 1
end
end
@doc """
Retrieves the block number of the highest rollup block that has been included in a batch.
## Parameters
- `value_if_nil`: The default value to return if no rollup batches are found.
## Returns
- The number of the highest rollup block included in a batch
or `value_if_nil` if no rollup batches are found.
"""
@spec highest_committed_block(nil | integer()) :: nil | FullBlock.block_number()
def highest_committed_block(value_if_nil)
when is_integer(value_if_nil) or is_nil(value_if_nil) do
case Reader.highest_committed_block() do
nil -> value_if_nil
value -> value
end
end
@doc """
Calculates the next L1 block number to search for the latest message sent to L2.
## Parameters
- `value_if_nil`: The default value to return if no L1-to-L2 messages have been discovered.
## Returns
- The L1 block number immediately following the latest discovered message to L2,
or `value_if_nil` if no messages to L2 have been found.
"""
@spec l1_block_to_discover_latest_message_to_l2(nil | FullBlock.block_number()) :: nil | FullBlock.block_number()
def l1_block_to_discover_latest_message_to_l2(value_if_nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.l1_block_of_latest_discovered_message_to_l2() do
nil ->
log_warning("No messages to L2 found in DB")
value_if_nil
value ->
value + 1
end
end
@doc """
Calculates the next L1 block number to start the search for messages sent to L2
that precede the earliest message already discovered.
## Parameters
- `value_if_nil`: The default value to return if no L1-to-L2 messages have been discovered.
## Returns
- The L1 block number immediately preceding the earliest discovered message to L2,
or `value_if_nil` if no messages to L2 have been found.
"""
@spec l1_block_to_discover_earliest_message_to_l2(nil | FullBlock.block_number()) :: nil | FullBlock.block_number()
def l1_block_to_discover_earliest_message_to_l2(value_if_nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.l1_block_of_earliest_discovered_message_to_l2() do
nil ->
log_warning("No messages to L2 found in DB")
value_if_nil
value ->
value - 1
end
end
@doc """
Determines the rollup block number to start searching for missed messages originating from L2.
## Parameters
- `value_if_nil`: The default value to return if no messages originating from L2 have been found.
## Returns
- The rollup block number just before the earliest discovered message from L2,
or `value_if_nil` if no messages from L2 are found.
"""
@spec rollup_block_to_discover_missed_messages_from_l2(nil | FullBlock.block_number()) ::
nil | FullBlock.block_number()
def rollup_block_to_discover_missed_messages_from_l2(value_if_nil \\ nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.rollup_block_of_earliest_discovered_message_from_l2() do
nil ->
log_warning("No messages from L2 found in DB")
value_if_nil
value ->
value - 1
end
end
@doc """
Determines the rollup block number to start searching for missed messages originating to L2.
## Parameters
- `value_if_nil`: The default value to return if no messages originating to L2 have been found.
## Returns
- The rollup block number just before the earliest discovered message to L2,
or `value_if_nil` if no messages to L2 are found.
"""
@spec rollup_block_to_discover_missed_messages_to_l2(nil | FullBlock.block_number()) :: nil | FullBlock.block_number()
def rollup_block_to_discover_missed_messages_to_l2(value_if_nil \\ nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.rollup_block_of_earliest_discovered_message_to_l2() do
nil ->
# In theory it could be a situation when when the earliest message points
# to a completion transaction which is not indexed yet. In this case, this
# warning will occur.
log_warning("No completed messages to L2 found in DB")
value_if_nil
value ->
value - 1
end
end
@doc """
Retrieves the L1 block number immediately following the block where the confirmation transaction
for the highest confirmed rollup block was included.
## Parameters
- `value_if_nil`: The default value to return if no confirmed rollup blocks are found.
## Returns
- The L1 block number immediately after the block containing the confirmation transaction of
the highest confirmed rollup block, or `value_if_nil` if no confirmed rollup blocks are present.
"""
@spec l1_block_of_latest_confirmed_block(nil | FullBlock.block_number()) :: nil | FullBlock.block_number()
def l1_block_of_latest_confirmed_block(value_if_nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.l1_block_of_latest_confirmed_block() do
nil ->
log_warning("No confirmed blocks found in DB")
value_if_nil
value ->
value + 1
end
end
@doc """
Retrieves the block number of the highest rollup block for which a confirmation transaction
has been sent to L1.
## Parameters
- `value_if_nil`: The default value to return if no confirmed rollup blocks are found.
## Returns
- The block number of the highest confirmed rollup block,
or `value_if_nil` if no confirmed rollup blocks are found in the database.
"""
@spec highest_confirmed_block(nil | integer()) :: nil | FullBlock.block_number()
def highest_confirmed_block(value_if_nil)
when is_integer(value_if_nil) or is_nil(value_if_nil) do
case Reader.highest_confirmed_block() do
nil -> value_if_nil
value -> value
end
end
@doc """
Determines the next L1 block number to search for the latest execution of an L2-to-L1 message.
## Parameters
- `value_if_nil`: The default value to return if no execution transactions for L2-to-L1 messages
have been recorded.
## Returns
- The L1 block number following the block that contains the latest execution transaction
for an L2-to-L1 message, or `value_if_nil` if no such executions have been found.
"""
@spec l1_block_to_discover_latest_execution(nil | FullBlock.block_number()) :: nil | FullBlock.block_number()
def l1_block_to_discover_latest_execution(value_if_nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.l1_block_of_latest_execution() do
nil ->
log_warning("No L1 executions found in DB")
value_if_nil
value ->
value + 1
end
end
@doc """
Determines the L1 block number just before the block that contains the earliest known
execution transaction for an L2-to-L1 message.
## Parameters
- `value_if_nil`: The default value to return if no execution transactions for
L2-to-L1 messages have been found.
## Returns
- The L1 block number preceding the earliest known execution transaction for
an L2-to-L1 message, or `value_if_nil` if no such executions are found in the database.
"""
@spec l1_block_to_discover_earliest_execution(nil | FullBlock.block_number()) :: nil | FullBlock.block_number()
def l1_block_to_discover_earliest_execution(value_if_nil)
when (is_integer(value_if_nil) and value_if_nil >= 0) or is_nil(value_if_nil) do
case Reader.l1_block_of_earliest_execution() do
nil ->
log_warning("No L1 executions found in DB")
value_if_nil
value ->
value - 1
end
end
@doc """
Retrieves full details of rollup blocks, including associated transactions, for each
block number specified in the input list.
## Parameters
- `list_of_block_numbers`: A list of block numbers for which full block details are to be retrieved.
## Returns
- A list of `Explorer.Chain.Block` instances containing detailed information for each
block number in the input list. Returns an empty list if no blocks are found for the given numbers.
"""
@spec rollup_blocks(maybe_improper_list(FullBlock.block_number(), [])) :: [FullBlock]
def rollup_blocks(list_of_block_numbers)
when is_list(list_of_block_numbers) do
query =
from(
block in FullBlock,
where: block.number in ^list_of_block_numbers
)
query
# :optional is used since a block may not have any transactions
|> Chain.join_associations(%{:transactions => :optional})
|> Repo.all(timeout: :infinity)
end
@doc """
Retrieves unfinalized L1 transactions that are involved in changing the statuses
of rollup blocks or transactions.
An L1 transaction is considered unfinalized if it has not yet reached a state
where it is permanently included in the blockchain, meaning it is still susceptible
to potential reorganization or change. Transactions are evaluated against
the finalized_block parameter to determine their finalized status.
## Parameters
- `finalized_block`: The block number up to which unfinalized transactions are to be retrieved.
## Returns
- A list of maps representing unfinalized L1 transactions and compatible with the
database import operation.
"""
@spec lifecycle_unfinalized_transactions(FullBlock.block_number()) :: [
%{
id: non_neg_integer(),
hash: Hash,
block_number: FullBlock.block_number(),
timestamp: DateTime,
status: :unfinalized
}
]
def lifecycle_unfinalized_transactions(finalized_block)
when is_integer(finalized_block) and finalized_block >= 0 do
finalized_block
|> Reader.lifecycle_unfinalized_transactions()
|> Enum.map(&lifecycle_transaction_to_map/1)
end
@doc """
Retrieves the block number associated with a specific hash of a rollup block.
## Parameters
- `hash`: The hash of the rollup block whose number is to be retrieved.
## Returns
- The block number associated with the given rollup block hash.
"""
@spec rollup_block_hash_to_num(binary()) :: FullBlock.block_number() | nil
def rollup_block_hash_to_num(hash) when is_binary(hash) do
Reader.rollup_block_hash_to_num(hash)
end
@doc """
Retrieves the L1 batch that includes a specified rollup block number.
## Parameters
- `num`: The block number of the rollup block for which the containing
L1 batch is to be retrieved.
## Returns
- The `Explorer.Chain.Arbitrum.L1Batch` associated with the given rollup block number
if it exists and its commit transaction is loaded.
"""
@spec get_batch_by_rollup_block_number(FullBlock.block_number()) :: Explorer.Chain.Arbitrum.L1Batch | nil
def get_batch_by_rollup_block_number(num)
when is_integer(num) and num >= 0 do
case Reader.get_batch_by_rollup_block_number(num) do
nil ->
nil
batch ->
case batch.commitment_transaction do
nil ->
raise "Incorrect state of the DB: commitment_transaction is not loaded for the batch with number #{num}"
%Ecto.Association.NotLoaded{} ->
raise "Incorrect state of the DB: commitment_transaction is not loaded for the batch with number #{num}"
_ ->
batch
end
end
end
@doc """
Retrieves rollup blocks within a specified block range that have not yet been confirmed.
## Parameters
- `first_block`: The starting block number of the range to search for unconfirmed rollup blocks.
- `last_block`: The ending block number of the range.
## Returns
- A list of maps, each representing an unconfirmed rollup block within the specified range.
If no unconfirmed blocks are found within the range, an empty list is returned.
"""
@spec unconfirmed_rollup_blocks(FullBlock.block_number(), FullBlock.block_number()) :: [
%{
batch_number: non_neg_integer(),
block_number: FullBlock.block_number(),
confirmation_id: non_neg_integer() | nil
}
]
def unconfirmed_rollup_blocks(first_block, last_block)
when is_integer(first_block) and first_block >= 0 and
is_integer(last_block) and first_block <= last_block do
# credo:disable-for-lines:2 Credo.Check.Refactor.PipeChainStart
Reader.unconfirmed_rollup_blocks(first_block, last_block)
|> Enum.map(&rollup_block_to_map/1)
end
@doc """
Counts the number of confirmed rollup blocks in a specified batch.
## Parameters
- `batch_number`: The batch number for which the count of confirmed rollup blocks
is to be determined.
## Returns
- A number of rollup blocks confirmed in the specified batch.
"""
@spec count_confirmed_rollup_blocks_in_batch(non_neg_integer()) :: non_neg_integer()
def count_confirmed_rollup_blocks_in_batch(batch_number)
when is_integer(batch_number) and batch_number >= 0 do
Reader.count_confirmed_rollup_blocks_in_batch(batch_number)
end
@doc """
Retrieves a list of L2-to-L1 messages that have been initiated up to
a specified rollup block number.
## Parameters
- `block_number`: The block number up to which initiated L2-to-L1 messages
should be retrieved.
## Returns
- A list of maps, each representing an initiated L2-to-L1 message compatible with the
database import operation. If no initiated messages are found up to the specified
block number, an empty list is returned.
"""
@spec initiated_l2_to_l1_messages(FullBlock.block_number()) :: [
%{
direction: :from_l2,
message_id: non_neg_integer(),
originator_address: binary(),
originating_transaction_hash: binary(),
originating_transaction_block_number: FullBlock.block_number(),
completion_transaction_hash: nil,
status: :initiated
}
]
def initiated_l2_to_l1_messages(block_number)
when is_integer(block_number) and block_number >= 0 do
# credo:disable-for-lines:2 Credo.Check.Refactor.PipeChainStart
Reader.l2_to_l1_messages(:initiated, block_number)
|> Enum.map(&message_to_map/1)
end
@doc """
Retrieves a list of L2-to-L1 'sent' messages that have been included up to
a specified rollup block number.
A message is considered 'sent' when there is a batch including the transaction
that initiated the message, and this batch has been successfully delivered to L1.
## Parameters
- `block_number`: The block number up to which sent L2-to-L1 messages are to be retrieved.
## Returns
- A list of maps, each representing a sent L2-to-L1 message compatible with the
database import operation. If no messages with the 'sent' status are found by
the specified block number, an empty list is returned.
"""
@spec sent_l2_to_l1_messages(FullBlock.block_number()) :: [
%{
direction: :from_l2,
message_id: non_neg_integer(),
originator_address: binary(),
originating_transaction_hash: binary(),
originating_transaction_block_number: FullBlock.block_number(),
completion_transaction_hash: nil,
status: :sent
}
]
def sent_l2_to_l1_messages(block_number)
when is_integer(block_number) and block_number >= 0 do
# credo:disable-for-lines:2 Credo.Check.Refactor.PipeChainStart
Reader.l2_to_l1_messages(:sent, block_number)
|> Enum.map(&message_to_map/1)
end
@doc """
Retrieves a list of L2-to-L1 'confirmed' messages that have been included up to
a specified rollup block number.
A message is considered 'confirmed' when its transaction was included in a rollup block,
and the confirmation of this block has been delivered to L1.
## Parameters
- `block_number`: The block number up to which confirmed L2-to-L1 messages are to be retrieved.
## Returns
- A list of maps, each representing a confirmed L2-to-L1 message compatible with the
database import operation. If no messages with the 'confirmed' status are found by
the specified block number, an empty list is returned.
"""
@spec confirmed_l2_to_l1_messages(FullBlock.block_number()) :: [
%{
direction: :from_l2,
message_id: non_neg_integer(),
originator_address: binary(),
originating_transaction_hash: binary(),
originating_transaction_block_number: FullBlock.block_number(),
completion_transaction_hash: nil,
status: :confirmed
}
]
def confirmed_l2_to_l1_messages(block_number)
when is_integer(block_number) and block_number >= 0 do
# credo:disable-for-lines:2 Credo.Check.Refactor.PipeChainStart
Reader.l2_to_l1_messages(:confirmed, block_number)
|> Enum.map(&message_to_map/1)
end
@doc """
Checks if the numbers from the provided list correspond to the numbers of indexed batches.
## Parameters
- `batches_numbers`: The list of batch numbers.
## Returns
- A list of batch numbers that are indexed and match the provided list, or `[]`
if none of the batch numbers in the provided list exist in the database. The output list
may be smaller than the input list.
"""
@spec batches_exist([non_neg_integer()]) :: [non_neg_integer()]
def batches_exist(batches_numbers) when is_list(batches_numbers) do
Reader.batches_exist(batches_numbers)
end
@doc """
Reads a list of transactions executing L2-to-L1 messages by their IDs.
## Parameters
- `message_ids`: A list of IDs to retrieve executing transactions for.
## Returns
- A list of `Explorer.Chain.Arbitrum.L1Execution` corresponding to the message IDs from
the input list. The output list may be smaller than the input list if some IDs do not
correspond to any existing transactions.
"""
@spec l1_executions([non_neg_integer()]) :: [Explorer.Chain.Arbitrum.L1Execution]
def l1_executions(message_ids) when is_list(message_ids) do
Reader.l1_executions(message_ids)
end
@doc """
Identifies the range of L1 blocks to investigate for missing confirmations of rollup blocks.
This function determines the L1 block numbers bounding the interval where gaps in rollup block
confirmations might exist. It uses the earliest and latest L1 block numbers associated with
unconfirmed rollup blocks to define this range.
## Parameters
- `right_pos_value_if_nil`: The default value to use for the upper bound of the range if no
confirmed blocks found.
## Returns
- A tuple containing two elements: the lower and upper bounds of L1 block numbers to check
for missing rollup block confirmations. If the necessary confirmation data is unavailable,
the first element will be `nil`, and the second will be `right_pos_value_if_nil`.
"""
@spec l1_blocks_to_expect_rollup_blocks_confirmation(nil | FullBlock.block_number()) ::
{nil | FullBlock.block_number(), nil | FullBlock.block_number()}
def l1_blocks_to_expect_rollup_blocks_confirmation(right_pos_value_if_nil)
when (is_integer(right_pos_value_if_nil) and right_pos_value_if_nil >= 0) or is_nil(right_pos_value_if_nil) do
case Reader.l1_blocks_of_confirmations_bounding_first_unconfirmed_rollup_blocks_gap() do
nil ->
log_warning("No L1 confirmations found in DB")
{nil, right_pos_value_if_nil}
{nil, newer_confirmation_l1_block} ->
{nil, newer_confirmation_l1_block - 1}
{older_confirmation_l1_block, newer_confirmation_l1_block} ->
{older_confirmation_l1_block + 1, newer_confirmation_l1_block - 1}
end
end
@doc """
Retrieves all rollup logs in the range of blocks from `start_block` to `end_block`
corresponding to the `L2ToL1Tx` event emitted by the ArbSys contract.
## Parameters
- `start_block`: The starting block number of the range from which to
retrieve the transaction logs containing L2-to-L1 messages.
- `end_block`: The ending block number of the range.
## Returns
- A list of log maps for the `L2ToL1Tx` event where binary values for hashes
and data are decoded into hex strings, containing detailed information about
each event within the specified block range. Returns an empty list if no
relevant logs are found.
"""
@spec l2_to_l1_logs(FullBlock.block_number(), FullBlock.block_number()) :: [
%{
data: String,
index: non_neg_integer(),
first_topic: String,
second_topic: String,
third_topic: String,
fourth_topic: String,
address_hash: String,
transaction_hash: String,
block_hash: String,
block_number: FullBlock.block_number()
}
]
def l2_to_l1_logs(start_block, end_block)
when is_integer(start_block) and start_block >= 0 and
is_integer(end_block) and start_block <= end_block do
arbsys_contract = Application.get_env(:indexer, Indexer.Fetcher.Arbitrum.Messaging)[:arbsys_contract]
query =
from(log in Log,
where:
log.block_number >= ^start_block and
log.block_number <= ^end_block and
log.address_hash == ^arbsys_contract and
log.first_topic == ^@l2_to_l1_event
)
query
|> Repo.all(timeout: :infinity)
|> Enum.map(&logs_to_map/1)
end
@doc """
Returns 32-byte signature of the event `L2ToL1Tx`
"""
@spec l2_to_l1_event() :: <<_::528>>
def l2_to_l1_event, do: @l2_to_l1_event
@doc """
Determines whether a given range of block numbers has been fully indexed without any missing blocks.
## Parameters
- `start_block`: The starting block number of the range to check for completeness in indexing.
- `end_block`: The ending block number of the range.
## Returns
- `true` if the entire range from `start_block` to `end_block` is indexed and contains no missing
blocks, indicating no intersection with missing block ranges; `false` otherwise.
"""
@spec indexed_blocks?(FullBlock.block_number(), FullBlock.block_number()) :: boolean()
def indexed_blocks?(start_block, end_block)
when is_integer(start_block) and start_block >= 0 and
is_integer(end_block) and start_block <= end_block do
is_nil(MissingBlockRange.intersects_with_range(start_block, end_block))
end
@doc """
Retrieves the block number for the closest block immediately after a given timestamp.
## Parameters
- `timestamp`: The `DateTime` timestamp for which the closest subsequent block number is sought.
## Returns
- `{:ok, block_number}` where `block_number` is the number of the closest block that occurred
after the specified timestamp.
- `{:error, :not_found}` if no block is found after the specified timestamp.
"""
@spec closest_block_after_timestamp(DateTime.t()) :: {:error, :not_found} | {:ok, FullBlock.block_number()}
def closest_block_after_timestamp(timestamp) do
Chain.timestamp_to_block_number(timestamp, :after, false)
end
defp lifecycle_transaction_to_map(tx) do
[:id, :hash, :block_number, :timestamp, :status]
|> db_record_to_map(tx)
end
defp rollup_block_to_map(block) do
[:batch_number, :block_number, :confirmation_id]
|> db_record_to_map(block)
end
defp message_to_map(message) do
[
:direction,
:message_id,
:originator_address,
:originating_transaction_hash,
:originating_transaction_block_number,
:completion_transaction_hash,
:status
]
|> db_record_to_map(message)
end
defp logs_to_map(log) do
[
:data,
:index,
:first_topic,
:second_topic,
:third_topic,
:fourth_topic,
:address_hash,
:transaction_hash,
:block_hash,
:block_number
]
|> db_record_to_map(log, true)
end
defp db_record_to_map(required_keys, record, encode \\ false) do
required_keys
|> Enum.reduce(%{}, fn key, record_as_map ->
raw_value = Map.get(record, key)
# credo:disable-for-lines:5 Credo.Check.Refactor.Nesting
value =
case raw_value do
%Hash{} -> if(encode, do: Hash.to_string(raw_value), else: raw_value.bytes)
%Data{} -> if(encode, do: Data.to_string(raw_value), else: raw_value.bytes)
_ -> raw_value
end
Map.put(record_as_map, key, value)
end)
end
end

@ -0,0 +1,86 @@
defmodule Indexer.Fetcher.Arbitrum.Utils.Helper do
@moduledoc """
Provides utility functions to support the handling of Arbitrum-specific data fetching and processing in the indexer.
"""
@doc """
Increases a base duration by an amount specified in a map, if present.
This function takes a map that may contain a duration key and a current duration value.
If the map contains a duration, it is added to the current duration; otherwise, the
current duration is returned unchanged.
## Parameters
- `data`: A map that may contain a `:duration` key with its value representing
the amount of time to add.
- `cur_duration`: The current duration value, to which the duration from the map
will be added if present.
## Returns
- The increased duration.
"""
@spec increase_duration(
%{optional(:duration) => non_neg_integer(), optional(any()) => any()},
non_neg_integer()
) :: non_neg_integer()
def increase_duration(data, cur_duration)
when is_map(data) and is_integer(cur_duration) and cur_duration >= 0 do
if Map.has_key?(data, :duration) do
data.duration + cur_duration
else
cur_duration
end
end
@doc """
Enriches lifecycle transaction entries with timestamps and status based on provided block information and finalization tracking.
This function takes a map of lifecycle transactions and extends each entry with
a timestamp (extracted from a corresponding map of block numbers to timestamps)
and a status. The status is determined based on whether finalization tracking is enabled.
## Parameters
- `lifecycle_txs`: A map where each key is a transaction identifier, and the value is
a map containing at least the block number (`:block`).
- `blocks_to_ts`: A map linking block numbers to their corresponding timestamps.
- `track_finalization?`: A boolean flag indicating whether to mark transactions
as unfinalized or finalized.
## Returns
- An updated map of the same structure as `lifecycle_txs` but with each transaction extended to include:
- `timestamp`: The timestamp of the block in which the transaction is included.
- `status`: Either `:unfinalized` if `track_finalization?` is `true`, or `:finalized` otherwise.
"""
@spec extend_lifecycle_txs_with_ts_and_status(
%{binary() => %{:block => non_neg_integer(), optional(any()) => any()}},
%{non_neg_integer() => DateTime.t()},
boolean()
) :: %{
binary() => %{
:block => non_neg_integer(),
:timestamp => DateTime.t(),
:status => :unfinalized | :finalized,
optional(any()) => any()
}
}
def extend_lifecycle_txs_with_ts_and_status(lifecycle_txs, blocks_to_ts, track_finalization?)
when is_map(lifecycle_txs) and is_map(blocks_to_ts) and is_boolean(track_finalization?) do
lifecycle_txs
|> Map.keys()
|> Enum.reduce(%{}, fn tx_key, updated_txs ->
Map.put(
updated_txs,
tx_key,
Map.merge(lifecycle_txs[tx_key], %{
timestamp: blocks_to_ts[lifecycle_txs[tx_key].block_number],
status:
if track_finalization? do
:unfinalized
else
:finalized
end
})
)
end)
end
end

@ -0,0 +1,162 @@
defmodule Indexer.Fetcher.Arbitrum.Utils.Logging do
@moduledoc """
Common logging functions for Indexer.Fetcher.Arbitrum fetchers
"""
require Logger
@doc """
A helper function to log a message with debug severity. Uses `Logger.debug` facility.
## Parameters
- `msg`: a message to log
## Returns
`:ok`
"""
@spec log_debug(any()) :: :ok
def log_debug(msg) do
Logger.debug(msg)
end
@doc """
A helper function to log a message with warning severity. Uses `Logger.warning` facility.
## Parameters
- `msg`: a message to log
## Returns
`:ok`
"""
@spec log_warning(any()) :: :ok
def log_warning(msg) do
Logger.warning(msg)
end
@doc """
A helper function to log a message with info severity. Uses `Logger.info` facility.
## Parameters
- `msg`: a message to log
## Returns
`:ok`
"""
@spec log_info(any()) :: :ok
def log_info(msg) do
Logger.info(msg)
end
@doc """
A helper function to log a message with error severity. Uses `Logger.error` facility.
## Parameters
- `msg`: a message to log
## Returns
`:ok`
"""
@spec log_error(any()) :: :ok
def log_error(msg) do
Logger.error(msg)
end
@doc """
A helper function to log progress when handling data items in chunks.
## Parameters
- `prefix`: A prefix for the logging message.
- `data_items_names`: A tuple with singular and plural of data items names
- `chunk`: A list of data items numbers in the current chunk.
- `current_progress`: The total number of data items handled up to this moment.
- `total`: The total number of data items across all chunks.
## Returns
`:ok`
## Examples:
- `log_details_chunk_handling("A message", {"batch", "batches"}, [1, 2, 3], 0, 10)` produces
`A message for batches 1..3. Progress 30%`
- `log_details_chunk_handling("A message", {"batch", "batches"}, [2], 1, 10)` produces
`A message for batch 2. Progress 20%`
- `log_details_chunk_handling("A message", {"block", "blocks"}, [35], 0, 1)` produces
`A message for block 35.`
- `log_details_chunk_handling("A message", {"block", "blocks"}, [45, 50, 51, 52, 60], 1, 1)` produces
`A message for blocks 45, 50..52, 60.`
"""
@spec log_details_chunk_handling(binary(), tuple(), list(), non_neg_integer(), non_neg_integer()) :: :ok
def log_details_chunk_handling(prefix, data_items_names, chunk, current_progress, total)
# credo:disable-for-previous-line Credo.Check.Refactor.CyclomaticComplexity
when is_binary(prefix) and is_tuple(data_items_names) and is_list(chunk) and
(is_integer(current_progress) and current_progress >= 0) and
(is_integer(total) and total > 0) do
chunk_length = length(chunk)
progress =
case chunk_length == total do
true ->
""
false ->
percentage =
(current_progress + chunk_length)
|> Decimal.div(total)
|> Decimal.mult(100)
|> Decimal.round(2)
|> Decimal.to_string()
" Progress: #{percentage}%"
end
if chunk_length == 1 do
log_debug("#{prefix} for #{elem(data_items_names, 0)} ##{Enum.at(chunk, 0)}.")
else
log_debug(
"#{prefix} for #{elem(data_items_names, 1)} #{Enum.join(shorten_numbers_list(chunk), ", ")}.#{progress}"
)
end
end
# Transform list of numbers to the list of string where consequent values
# are combined to be displayed as a range.
#
# ## Parameters
# - `msg`: a message to log
#
# ## Returns
# `shorten_list` - resulting list after folding
#
# ## Examples:
# [1, 2, 3] => ["1..3"]
# [1, 3] => ["1", "3"]
# [1, 2] => ["1..2"]
# [1, 3, 4, 5] => ["1", "3..5"]
defp shorten_numbers_list(numbers_list) do
{shorten_list, _, _} =
numbers_list
|> Enum.sort()
|> Enum.reduce({[], nil, nil}, fn number, {shorten_list, prev_range_start, prev_number} ->
shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number)
end)
|> then(fn {shorten_list, prev_range_start, prev_number} ->
shorten_numbers_list_impl(prev_number, shorten_list, prev_range_start, prev_number)
end)
Enum.reverse(shorten_list)
end
defp shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number) do
cond do
is_nil(prev_number) ->
{[], number, number}
prev_number + 1 != number and prev_range_start == prev_number ->
{["#{prev_range_start}" | shorten_list], number, number}
prev_number + 1 != number ->
{["#{prev_range_start}..#{prev_number}" | shorten_list], number, number}
true ->
{shorten_list, prev_range_start, number}
end
end
end

@ -0,0 +1,391 @@
defmodule Indexer.Fetcher.Arbitrum.Utils.Rpc do
@moduledoc """
Common functions to simplify RPC routines for Indexer.Fetcher.Arbitrum fetchers
"""
import EthereumJSONRPC,
only: [json_rpc: 2, quantity_to_integer: 1, timestamp_to_datetime: 1]
alias EthereumJSONRPC.Transport
alias Indexer.Helper, as: IndexerHelper
@zero_hash "0000000000000000000000000000000000000000000000000000000000000000"
@rpc_resend_attempts 20
@selector_outbox "ce11e6ab"
@selector_sequencer_inbox "ee35f327"
@selector_bridge "e78cea92"
@rollup_contract_abi [
%{
"inputs" => [],
"name" => "outbox",
"outputs" => [
%{
"internalType" => "address",
"name" => "",
"type" => "address"
}
],
"stateMutability" => "view",
"type" => "function"
},
%{
"inputs" => [],
"name" => "sequencerInbox",
"outputs" => [
%{
"internalType" => "address",
"name" => "",
"type" => "address"
}
],
"stateMutability" => "view",
"type" => "function"
},
%{
"inputs" => [],
"name" => "bridge",
"outputs" => [
%{
"internalType" => "address",
"name" => "",
"type" => "address"
}
],
"stateMutability" => "view",
"type" => "function"
}
]
@doc """
Constructs a JSON RPC request to retrieve a transaction by its hash.
## Parameters
- `%{hash: tx_hash, id: id}`: A map containing the transaction hash (`tx_hash`) and
an identifier (`id`) for the request, which can be used later to establish
correspondence between requests and responses.
## Returns
- A `Transport.request()` struct representing the JSON RPC request for fetching
the transaction details associated with the given hash.
"""
@spec transaction_by_hash_request(%{hash: EthereumJSONRPC.hash(), id: non_neg_integer()}) :: Transport.request()
def transaction_by_hash_request(%{id: id, hash: tx_hash})
when is_binary(tx_hash) and is_integer(id) do
EthereumJSONRPC.request(%{id: id, method: "eth_getTransactionByHash", params: [tx_hash]})
end
@doc """
Retrieves specific contract addresses associated with Arbitrum rollup contract.
This function fetches the addresses of the bridge, sequencer inbox, and outbox
contracts related to the specified Arbitrum rollup address. It invokes one of
the contract methods `bridge()`, `sequencerInbox()`, or `outbox()` based on
the `contracts_set` parameter to obtain the required information.
## Parameters
- `rollup_address`: The address of the Arbitrum rollup contract from which
information is being retrieved.
- `contracts_set`: A symbol indicating the set of contracts to retrieve (`:bridge`
for the bridge contract, `:inbox_outbox` for the sequencer
inbox and outbox contracts).
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A map with keys corresponding to the contract types (`:bridge`, `:sequencer_inbox`,
`:outbox`) and values representing the contract addresses.
"""
@spec get_contracts_for_rollup(
EthereumJSONRPC.address(),
:bridge | :inbox_outbox,
EthereumJSONRPC.json_rpc_named_arguments()
) :: %{(:bridge | :sequencer_inbox | :outbox) => binary()}
def get_contracts_for_rollup(rollup_address, contracts_set, json_rpc_named_arguments)
def get_contracts_for_rollup(rollup_address, :bridge, json_rpc_named_arguments) do
call_simple_getters_in_rollup_contract(rollup_address, [@selector_bridge], json_rpc_named_arguments)
end
def get_contracts_for_rollup(rollup_address, :inbox_outbox, json_rpc_named_arguments) do
call_simple_getters_in_rollup_contract(
rollup_address,
[@selector_sequencer_inbox, @selector_outbox],
json_rpc_named_arguments
)
end
# Calls getter functions on a rollup contract and collects their return values.
#
# This function is designed to interact with a rollup contract and invoke specified getter methods.
# It creates a list of requests for each method ID, executes these requests with retries as needed,
# and then maps the results to the corresponding method IDs.
#
# ## Parameters
# - `rollup_address`: The address of the rollup contract to interact with.
# - `method_ids`: A list of method identifiers representing the getter functions to be called.
# - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
#
# ## Returns
# - A map where each key is a method identifier converted to an atom, and each value is the
# response from calling the respective method on the contract.
defp call_simple_getters_in_rollup_contract(rollup_address, method_ids, json_rpc_named_arguments) do
method_ids
|> Enum.map(fn method_id ->
%{
contract_address: rollup_address,
method_id: method_id,
args: []
}
end)
|> IndexerHelper.read_contracts_with_retries(@rollup_contract_abi, json_rpc_named_arguments, @rpc_resend_attempts)
|> Kernel.elem(0)
|> Enum.zip(method_ids)
|> Enum.reduce(%{}, fn {{:ok, [response]}, method_id}, retval ->
Map.put(retval, atomized_key(method_id), response)
end)
end
@doc """
Executes a batch of RPC calls and returns a list of response bodies.
This function processes a list of RPC requests and returns only the response bodies,
discarding the request IDs. The function is designed for scenarios where only
the response data is required, and the association with request IDs is not needed.
## Parameters
- `requests_list`: A list of `Transport.request()` instances representing the RPC calls to be made.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
- `help_str`: A string that helps identify the request type in log messages, used for error logging.
## Returns
- A list containing the bodies of the RPC call responses. This list will include both
successful responses and errors encountered during the batch execution. The developer
must handle these outcomes as appropriate.
"""
@spec make_chunked_request([Transport.request()], EthereumJSONRPC.json_rpc_named_arguments(), binary()) :: list()
def make_chunked_request(requests_list, json_rpc_named_arguments, help_str)
def make_chunked_request([], _, _) do
[]
end
def make_chunked_request(requests_list, json_rpc_named_arguments, help_str)
when is_list(requests_list) and is_binary(help_str) do
requests_list
|> make_chunked_request_keep_id(json_rpc_named_arguments, help_str)
|> Enum.map(fn %{result: resp_body} -> resp_body end)
end
@doc """
Executes a batch of RPC calls while preserving the original request IDs in the responses.
This function processes a list of RPC requests in batches, retaining the association
between the requests and their responses to ensure that each response can be traced
back to its corresponding request.
## Parameters
- `requests_list`: A list of `Transport.request()` instances representing the RPC calls to be made.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
- `help_str`: A string that helps identify the request type in log messages, used for error logging.
## Returns
- A list of maps, each containing the `id` and `result` from the RPC response, maintaining
the same order and ID as the original request. If the batch execution encounters errors
that cannot be resolved after the defined number of retries, the function will log
the errors using the provided `help_str` for context and will return a list of responses
where each element is either the result of a successful call or an error description.
It is the responsibility of the developer to distinguish between successful responses
and errors and handle them appropriately.
"""
@spec make_chunked_request_keep_id([Transport.request()], EthereumJSONRPC.json_rpc_named_arguments(), binary()) ::
[%{id: non_neg_integer(), result: any()}]
def make_chunked_request_keep_id(requests_list, json_rpc_named_arguments, help_str)
def make_chunked_request_keep_id([], _, _) do
[]
end
def make_chunked_request_keep_id(requests_list, json_rpc_named_arguments, help_str)
when is_list(requests_list) and is_binary(help_str) do
error_message_generator = &"Cannot call #{help_str}. Error: #{inspect(&1)}"
{:ok, responses} =
IndexerHelper.repeated_batch_rpc_call(
requests_list,
json_rpc_named_arguments,
error_message_generator,
@rpc_resend_attempts
)
responses
end
@doc """
Executes a list of block requests, retrieves their timestamps, and returns a map of block numbers to timestamps.
## Parameters
- `blocks_requests`: A list of `Transport.request()` instances representing the block
information requests.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
- `chunk_size`: The number of requests to be processed in each batch, defining the size of the chunks.
## Returns
- A map where each key is a block number and each value is the corresponding timestamp.
"""
@spec execute_blocks_requests_and_get_ts(
[Transport.request()],
EthereumJSONRPC.json_rpc_named_arguments(),
non_neg_integer()
) :: %{EthereumJSONRPC.block_number() => DateTime.t()}
def execute_blocks_requests_and_get_ts(blocks_requests, json_rpc_named_arguments, chunk_size)
when is_list(blocks_requests) and is_integer(chunk_size) do
blocks_requests
|> Enum.chunk_every(chunk_size)
|> Enum.reduce(%{}, fn chunk, result ->
chunk
|> make_chunked_request(json_rpc_named_arguments, "eth_getBlockByNumber")
|> Enum.reduce(result, fn resp, result_inner ->
Map.put(result_inner, quantity_to_integer(resp["number"]), timestamp_to_datetime(resp["timestamp"]))
end)
end)
end
@doc """
Executes a list of transaction requests and retrieves the sender (from) addresses for each.
## Parameters
- `txs_requests`: A list of `Transport.request()` instances representing the transaction requests.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
- `chunk_size`: The number of requests to be processed in each batch, defining the size of the chunks.
## Returns
- A map where each key is a transaction hash and each value is the corresponding sender's address.
"""
@spec execute_transactions_requests_and_get_from(
[Transport.request()],
EthereumJSONRPC.json_rpc_named_arguments(),
non_neg_integer()
) :: [%{EthereumJSONRPC.hash() => EthereumJSONRPC.address()}]
def execute_transactions_requests_and_get_from(txs_requests, json_rpc_named_arguments, chunk_size)
when is_list(txs_requests) and is_integer(chunk_size) do
txs_requests
|> Enum.chunk_every(chunk_size)
|> Enum.reduce(%{}, fn chunk, result ->
chunk
|> make_chunked_request(json_rpc_named_arguments, "eth_getTransactionByHash")
|> Enum.reduce(result, fn resp, result_inner ->
Map.put(result_inner, resp["hash"], resp["from"])
end)
end)
end
@doc """
Retrieves the block number associated with a given block hash using the Ethereum JSON RPC `eth_getBlockByHash` method, with retry logic for handling request failures.
## Parameters
- `hash`: The hash of the block for which the block number is requested.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- The block number if the block is found and successfully retrieved, or `nil`
if the block cannot be fetched or the block number is not present in the response.
"""
@spec get_block_number_by_hash(EthereumJSONRPC.hash(), EthereumJSONRPC.json_rpc_named_arguments()) ::
EthereumJSONRPC.block_number() | nil
def get_block_number_by_hash(hash, json_rpc_named_arguments) do
func = &do_get_block_number_by_hash/2
args = [hash, json_rpc_named_arguments]
error_message = &"Cannot fetch block #{hash} or its number. Error: #{inspect(&1)}"
case IndexerHelper.repeated_call(func, args, error_message, @rpc_resend_attempts) do
{:error, _} -> nil
{:ok, res} -> res
end
end
defp do_get_block_number_by_hash(hash, json_rpc_named_arguments) do
# credo:disable-for-lines:3 Credo.Check.Refactor.PipeChainStart
result =
EthereumJSONRPC.request(%{id: 0, method: "eth_getBlockByHash", params: [hash, false]})
|> json_rpc(json_rpc_named_arguments)
with {:ok, block} <- result,
false <- is_nil(block),
number <- Map.get(block, "number"),
false <- is_nil(number) do
{:ok, quantity_to_integer(number)}
else
{:error, message} ->
{:error, message}
true ->
{:error, "RPC returned nil."}
end
end
@doc """
Determines the starting block number for further operations with L1 based on configuration and network status.
This function selects the starting block number for operations involving L1.
If the configured block number is `0`, it attempts to retrieve the safe block number
from the network. Should the safe block number not be available (if the endpoint does
not support this feature), the latest block number is used instead. If a non-zero block
number is configured, that number is used directly.
## Parameters
- `configured_number`: The block number configured for starting operations.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- The block number from which to start further operations with L1, determined based
on the provided configuration and network capabilities.
"""
@spec get_l1_start_block(EthereumJSONRPC.block_number(), EthereumJSONRPC.json_rpc_named_arguments()) ::
EthereumJSONRPC.block_number()
def get_l1_start_block(configured_number, json_rpc_named_arguments) do
if configured_number == 0 do
{block_number, _} = IndexerHelper.get_safe_block(json_rpc_named_arguments)
block_number
else
configured_number
end
end
@doc """
Converts a transaction hash from its hexadecimal string representation to a binary format.
## Parameters
- `hash`: The transaction hash as a hex string, which can be `nil`. If `nil`, a default zero hash value is used.
## Returns
- The binary representation of the hash. If the input is `nil`, returns the binary form of the default zero hash.
"""
@spec string_hash_to_bytes_hash(EthereumJSONRPC.hash() | nil) :: binary()
def string_hash_to_bytes_hash(hash) do
hash
|> json_tx_id_to_hash()
|> Base.decode16!(case: :mixed)
end
defp json_tx_id_to_hash(hash) do
case hash do
"0x" <> tx_hash -> tx_hash
nil -> @zero_hash
end
end
@doc """
Retrieves the hardcoded number of resend attempts for RPC calls.
## Returns
- The number of resend attempts.
"""
@spec get_resend_attempts() :: non_neg_integer()
def get_resend_attempts do
@rpc_resend_attempts
end
defp atomized_key(@selector_outbox), do: :outbox
defp atomized_key(@selector_sequencer_inbox), do: :sequencer_inbox
defp atomized_key(@selector_bridge), do: :bridge
end

@ -0,0 +1,284 @@
defmodule Indexer.Fetcher.Arbitrum.Workers.HistoricalMessagesOnL2 do
@moduledoc """
Handles the discovery and processing of historical messages between Layer 1 (L1) and Layer 2 (L2) within an Arbitrum rollup.
L1-to-L2 messages are discovered by requesting rollup transactions through RPC.
This is necessary because some Arbitrum-specific fields are not included in the
already indexed transactions within the database.
L2-to-L1 messages are discovered by analyzing the logs of already indexed rollup
transactions.
"""
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_warning: 1, log_info: 1]
alias EthereumJSONRPC.Block.ByNumber, as: BlockByNumber
alias EthereumJSONRPC.Transaction, as: TransactionByRPC
alias Explorer.Chain
alias Indexer.Fetcher.Arbitrum.Messaging
alias Indexer.Fetcher.Arbitrum.Utils.{Db, Logging, Rpc}
require Logger
@doc """
Initiates the discovery process for historical messages sent from L2 to L1 up to a specified block number.
This function orchestrates the discovery of historical messages from L2 to L1
by analyzing the rollup logs representing the `L2ToL1Tx` event. It determines
the starting block for the discovery process and verifies that the relevant
rollup block range has been indexed before proceeding with the discovery and
data import. During the import process, each message is assigned the
appropriate status based on the current rollup state.
## Parameters
- `end_block`: The ending block number up to which the discovery should occur.
If `nil` or negative, the function returns with no action taken.
- `state`: Contains the operational configuration, including the depth of
blocks to consider for the starting point of message discovery.
## Returns
- `{:ok, nil}`: If `end_block` is `nil`, indicating no discovery action was required.
- `{:ok, 0}`: If `end_block` is negative, indicating that the genesis of the block
chain was reached.
- `{:ok, start_block}`: Upon successful discovery of historical messages, where
`start_block` indicates the necessity to consider another
block range in the next iteration of message discovery.
- `{:ok, end_block + 1}`: If the required block range is not fully indexed,
indicating that the next iteration of message discovery
should start with the same block range.
"""
@spec discover_historical_messages_from_l2(nil | integer(), %{
:config => %{
:messages_to_l2_blocks_depth => non_neg_integer(),
optional(any()) => any()
},
optional(any()) => any()
}) :: {:ok, nil | non_neg_integer()}
def discover_historical_messages_from_l2(end_block, state)
def discover_historical_messages_from_l2(end_block, _) when is_nil(end_block) do
{:ok, nil}
end
def discover_historical_messages_from_l2(end_block, _)
when is_integer(end_block) and end_block < 0 do
{:ok, 0}
end
def discover_historical_messages_from_l2(
end_block,
%{config: %{messages_from_l2_blocks_depth: messages_from_l2_blocks_depth}} = _state
)
when is_integer(end_block) and is_integer(messages_from_l2_blocks_depth) and
messages_from_l2_blocks_depth > 0 do
start_block = max(0, end_block - messages_from_l2_blocks_depth + 1)
if Db.indexed_blocks?(start_block, end_block) do
do_discover_historical_messages_from_l2(start_block, end_block)
else
log_warning(
"Not able to discover historical messages from L2, some blocks in #{start_block}..#{end_block} not indexed"
)
{:ok, end_block + 1}
end
end
# Discovers and processes historical messages sent from L2 to L1 within a specified rollup block range.
#
# This function fetches relevant rollup logs from the database representing messages sent
# from L2 to L1 (the `L2ToL1Tx` event) between the specified `start_block` and `end_block`.
# If any logs are found, they are used to construct message structures, which are then
# imported into the database. As part of the message construction, the appropriate status
# of the message (initialized, sent, or confirmed) is determined based on the current rollup
# state.
#
# ## Parameters
# - `start_block`: The starting block number for the discovery range.
# - `end_block`: The ending block number for the discovery range.
#
# ## Returns
# - `{:ok, start_block}`: A tuple indicating successful processing, returning the initial
# starting block number.
defp do_discover_historical_messages_from_l2(start_block, end_block) do
log_info("Block range for discovery historical messages from L2: #{start_block}..#{end_block}")
logs = Db.l2_to_l1_logs(start_block, end_block)
unless logs == [] do
messages =
logs
|> Messaging.handle_filtered_l2_to_l1_messages(__MODULE__)
import_to_db(messages)
end
{:ok, start_block}
end
@doc """
Initiates the discovery of historical messages sent from L1 to L2 up to a specified block number.
This function orchestrates the process of discovering historical L1-to-L2 messages within
a given rollup block range, based on the existence of the `requestId` field in the rollup
transaction body. Transactions are requested through RPC because already indexed
transactions from the database cannot be utilized; the `requestId` field is not included
in the transaction model. The function ensures that the block range has been indexed
before proceeding with message discovery and import. The imported messages are marked as
`:relayed`, as they represent completed actions from L1 to L2.
## Parameters
- `end_block`: The ending block number for the discovery operation. If `nil` or negative,
the function returns immediately with no action.
- `state`: The current state of the operation, containing configuration parameters
including `messages_to_l2_blocks_depth`, `chunk_size`, and JSON RPC connection settings.
## Returns
- `{:ok, nil}`: If `end_block` is `nil`, indicating no action was necessary.
- `{:ok, 0}`: If `end_block` is negative, indicating that the genesis of the block chain
was reached.
- `{:ok, start_block}`: On successful completion of historical message discovery, where
`start_block` indicates the necessity to consider another block
range in the next iteration of message discovery.
- `{:ok, end_block + 1}`: If the required block range is not fully indexed, indicating
that the next iteration of message discovery should start with
the same block range.
"""
@spec discover_historical_messages_to_l2(nil | integer(), %{
:config => %{
:messages_to_l2_blocks_depth => non_neg_integer(),
:rollup_rpc => %{
:chunk_size => non_neg_integer(),
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
},
optional(any()) => any()
},
optional(any()) => any()
}) :: {:ok, nil | non_neg_integer()}
def discover_historical_messages_to_l2(end_block, state)
def discover_historical_messages_to_l2(end_block, _) when is_nil(end_block) do
{:ok, nil}
end
def discover_historical_messages_to_l2(end_block, _)
when is_integer(end_block) and end_block < 0 do
{:ok, 0}
end
def discover_historical_messages_to_l2(end_block, %{config: %{messages_to_l2_blocks_depth: _} = config} = _state)
when is_integer(end_block) do
start_block = max(0, end_block - config.messages_to_l2_blocks_depth + 1)
# Although indexing blocks is not necessary to determine the completion of L1-to-L2 messages,
# for database consistency, it is preferable to delay marking these messages as completed.
if Db.indexed_blocks?(start_block, end_block) do
do_discover_historical_messages_to_l2(start_block, end_block, config)
else
log_warning(
"Not able to discover historical messages to L2, some blocks in #{start_block}..#{end_block} not indexed"
)
{:ok, end_block + 1}
end
end
# The function iterates through the block range in chunks, making RPC calls to fetch rollup block
# data and extract transactions. Each transaction is filtered for L1-to-L2 messages based on
# existence of `requestId` field in the transaction body, and then imported into the database.
# The imported messages are marked as `:relayed` as they represent completed actions from L1 to L2.
#
# Already indexed transactions from the database cannot be used because the `requestId` field is
# not included in the transaction model.
#
# ## Parameters
# - `start_block`: The starting block number for the discovery range.
# - `end_block`: The ending block number for the discovery range.
# - `config`: The configuration map containing settings for RPC communication and chunk size.
#
# ## Returns
# - `{:ok, start_block}`: A tuple indicating successful processing, returning the initial
# starting block number.
defp do_discover_historical_messages_to_l2(
start_block,
end_block,
%{rollup_rpc: %{chunk_size: chunk_size, json_rpc_named_arguments: json_rpc_named_arguments}} = _config
) do
log_info("Block range for discovery historical messages to L2: #{start_block}..#{end_block}")
{messages, _} =
start_block..end_block
|> Enum.chunk_every(chunk_size)
|> Enum.reduce({[], 0}, fn chunk, {messages_acc, chunks_counter} ->
Logging.log_details_chunk_handling(
"Collecting rollup data",
{"block", "blocks"},
chunk,
chunks_counter,
end_block - start_block + 1
)
# Since DB does not contain the field RequestId specific to Arbitrum
# all transactions will be requested from the rollup RPC endpoint.
# The catchup process intended to be run once and only for the BS instance
# which are already exist, so it does not make sense to introduce
# the new field in DB
requests = build_block_by_number_requests(chunk)
messages =
requests
|> Rpc.make_chunked_request(json_rpc_named_arguments, "eth_getBlockByNumber")
|> get_transactions()
|> Enum.map(fn tx ->
tx
|> TransactionByRPC.to_elixir()
|> TransactionByRPC.elixir_to_params()
end)
|> Messaging.filter_l1_to_l2_messages(false)
{messages ++ messages_acc, chunks_counter + length(chunk)}
end)
unless messages == [] do
log_info("#{length(messages)} completions of L1-to-L2 messages will be imported")
end
import_to_db(messages)
{:ok, start_block}
end
# Constructs a list of `eth_getBlockByNumber` requests for a given list of block numbers.
defp build_block_by_number_requests(block_numbers) do
block_numbers
|> Enum.reduce([], fn block_num, requests_list ->
[
BlockByNumber.request(%{
id: block_num,
number: block_num
})
| requests_list
]
end)
end
# Aggregates transactions from a list of blocks, combining them into a single list.
defp get_transactions(blocks_by_rpc) do
blocks_by_rpc
|> Enum.reduce([], fn block_by_rpc, txs ->
block_by_rpc["transactions"] ++ txs
end)
end
# Imports a list of messages into the database.
defp import_to_db(messages) do
{:ok, _} =
Chain.import(%{
arbitrum_messages: %{params: messages},
timeout: :infinity
})
end
end

@ -0,0 +1,74 @@
defmodule Indexer.Fetcher.Arbitrum.Workers.L1Finalization do
@moduledoc """
Oversees the finalization of lifecycle transactions on Layer 1 (L1) for Arbitrum rollups.
This module is tasked with monitoring and updating the status of Arbitrum
lifecycle transactions that are related to the rollup process. It ensures that
transactions which have been confirmed up to the 'safe' block number on L1 are
marked as 'finalized' within the system's database.
"""
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_info: 1]
alias Indexer.Helper, as: IndexerHelper
alias Indexer.Fetcher.Arbitrum.Utils.{Db, Rpc}
alias Explorer.Chain
require Logger
@doc """
Monitors and updates the status of lifecycle transactions related an Arbitrum rollup to 'finalized'.
This function retrieves the current 'safe' block number from L1 and identifies
lifecycle transactions that are not yet finalized up to this block. It then
updates the status of these transactions to 'finalized' and imports the updated
data into the database.
## Parameters
- A map containing:
- `config`: Configuration settings including JSON RPC arguments for L1 used
to fetch the 'safe' block number.
## Returns
- `:ok`
"""
@spec monitor_lifecycle_txs(%{
:config => %{
:l1_rpc => %{
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
},
optional(any()) => any()
},
optional(any()) => any()
}) :: :ok
def monitor_lifecycle_txs(%{config: %{l1_rpc: %{json_rpc_named_arguments: json_rpc_named_arguments}}} = _state) do
{:ok, safe_block} =
IndexerHelper.get_block_number_by_tag(
"safe",
json_rpc_named_arguments,
Rpc.get_resend_attempts()
)
lifecycle_txs = Db.lifecycle_unfinalized_transactions(safe_block)
if length(lifecycle_txs) > 0 do
log_info("Discovered #{length(lifecycle_txs)} lifecycle transaction to be finalized")
updated_lifecycle_txs =
lifecycle_txs
|> Enum.map(fn tx ->
Map.put(tx, :status, :finalized)
end)
{:ok, _} =
Chain.import(%{
arbitrum_lifecycle_transactions: %{params: updated_lifecycle_txs},
timeout: :infinity
})
end
:ok
end
end

@ -0,0 +1,975 @@
defmodule Indexer.Fetcher.Arbitrum.Workers.NewBatches do
@moduledoc """
Manages the discovery and importation of new and historical batches of transactions for an Arbitrum rollup.
This module orchestrates the discovery of batches of transactions processed
through the Arbitrum Sequencer. It distinguishes between new batches currently
being created and historical batches processed in the past but not yet imported
into the database.
The process involves fetching logs for the `SequencerBatchDelivered` event
emitted by the Arbitrum `SequencerInbox` contract, processing these logs to
extract batch details, and then building the link between batches and the
corresponding rollup blocks and transactions. It also discovers those
cross-chain messages initiated in rollup blocks linked with the new batches
and updates the status of messages to consider them as committed (`:sent`).
For any blocks or transactions missing in the database, data is requested in
chunks from the rollup RPC endpoint by `eth_getBlockByNumber`. Additionally,
to complete batch details and lifecycle transactions, RPC calls to
`eth_getTransactionByHash` and `eth_getBlockByNumber` on L1 are made in chunks
for the necessary information not available in the logs.
"""
alias ABI.{FunctionSelector, TypeDecoder}
import EthereumJSONRPC, only: [quantity_to_integer: 1]
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_info: 1, log_debug: 1]
alias EthereumJSONRPC.Block.ByNumber, as: BlockByNumber
alias Indexer.Helper, as: IndexerHelper
alias Indexer.Fetcher.Arbitrum.Utils.{Db, Logging, Rpc}
alias Explorer.Chain
require Logger
# keccak256("SequencerBatchDelivered(uint256,bytes32,bytes32,bytes32,uint256,(uint64,uint64,uint64,uint64),uint8)")
@message_sequencer_batch_delivered "0x7394f4a19a13c7b92b5bb71033245305946ef78452f7b4986ac1390b5df4ebd7"
@doc """
Discovers and imports new batches of rollup transactions within the current L1 block range.
This function determines the L1 block range for discovering new batches of rollup
transactions. It retrieves logs representing SequencerBatchDelivered events
emitted by the SequencerInbox contract within this range. The logs are processed
to identify new batches and their corresponding details. Comprehensive data
structures for these batches, along with their lifecycle transactions, rollup
blocks, and rollup transactions, are constructed. In addition, the function
updates the status of L2-to-L1 messages that have been committed within these new
batches. All discovered and processed data are then imported into the database.
The process targets only the batches that have not been previously processed,
thereby enhancing efficiency.
## Parameters
- A map containing:
- `config`: Configuration settings including RPC configurations, SequencerInbox
address, a shift for the message to block number mapping, and
a limit for new batches discovery.
- `data`: Contains the starting block number for new batch discovery.
## Returns
- `{:ok, end_block}`: On successful discovery and processing, where `end_block`
indicates the necessity to consider the next block range
in the following iteration of new batch discovery.
- `{:ok, start_block - 1}`: If there are no new blocks to be processed,
indicating that the current start block should be
reconsidered in the next iteration.
"""
@spec discover_new_batches(%{
:config => %{
:l1_rpc => %{
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:logs_block_range => non_neg_integer(),
optional(any()) => any()
},
:l1_sequencer_inbox_address => binary(),
:messages_to_blocks_shift => non_neg_integer(),
:new_batches_limit => non_neg_integer(),
:rollup_rpc => %{
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:chunk_size => non_neg_integer(),
optional(any()) => any()
},
optional(any()) => any()
},
:data => %{:new_batches_start_block => non_neg_integer(), optional(any()) => any()},
optional(any()) => any()
}) :: {:ok, non_neg_integer()}
def discover_new_batches(
%{
config: %{
l1_rpc: l1_rpc_config,
rollup_rpc: rollup_rpc_config,
l1_sequencer_inbox_address: sequencer_inbox_address,
messages_to_blocks_shift: messages_to_blocks_shift,
new_batches_limit: new_batches_limit
},
data: %{new_batches_start_block: start_block}
} = _state
) do
# Requesting the "latest" block instead of "safe" allows to catch new batches
# without latency.
{:ok, latest_block} =
IndexerHelper.get_block_number_by_tag(
"latest",
l1_rpc_config.json_rpc_named_arguments,
Rpc.get_resend_attempts()
)
end_block = min(start_block + l1_rpc_config.logs_block_range - 1, latest_block)
if start_block <= end_block do
log_info("Block range for new batches discovery: #{start_block}..#{end_block}")
discover(
sequencer_inbox_address,
start_block,
end_block,
new_batches_limit,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
)
{:ok, end_block}
else
{:ok, start_block - 1}
end
end
@doc """
Discovers and imports historical batches of rollup transactions within a specified block range.
This function determines the L1 block range for discovering historical batches
of rollup transactions. Within this range, it retrieves logs representing the
SequencerBatchDelivered events emitted by the SequencerInbox contract. These
logs are processed to identify the batches and their details. The function then
constructs comprehensive data structures for batches, lifecycle transactions,
rollup blocks, and rollup transactions. Additionally, it identifies L2-to-L1
messages that have been committed within these batches and updates their status.
All discovered and processed data are then imported into the database, with the
process targeting only previously undiscovered batches to enhance efficiency.
## Parameters
- A map containing:
- `config`: Configuration settings including the L1 rollup initialization block,
RPC configurations, SequencerInbox address, a shift for the message
to block number mapping, and a limit for new batches discovery.
- `data`: Contains the ending block number for the historical batch discovery.
## Returns
- `{:ok, start_block}`: On successful discovery and processing, where `start_block`
is the calculated starting block for the discovery range,
indicating the need to consider another block range in the
next iteration of historical batch discovery.
- `{:ok, l1_rollup_init_block}`: If the discovery process has reached the rollup
initialization block, indicating that all batches
up to the rollup origins have been discovered and
no further action is needed.
"""
@spec discover_historical_batches(%{
:config => %{
:l1_rollup_init_block => non_neg_integer(),
:l1_rpc => %{
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:logs_block_range => non_neg_integer(),
optional(any()) => any()
},
:l1_sequencer_inbox_address => binary(),
:messages_to_blocks_shift => non_neg_integer(),
:new_batches_limit => non_neg_integer(),
:rollup_rpc => %{
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:chunk_size => non_neg_integer(),
optional(any()) => any()
},
optional(any()) => any()
},
:data => %{:historical_batches_end_block => any(), optional(any()) => any()},
optional(any()) => any()
}) :: {:ok, non_neg_integer()}
def discover_historical_batches(
%{
config: %{
l1_rpc: l1_rpc_config,
rollup_rpc: rollup_rpc_config,
l1_sequencer_inbox_address: sequencer_inbox_address,
messages_to_blocks_shift: messages_to_blocks_shift,
l1_rollup_init_block: l1_rollup_init_block,
new_batches_limit: new_batches_limit
},
data: %{historical_batches_end_block: end_block}
} = _state
) do
if end_block >= l1_rollup_init_block do
start_block = max(l1_rollup_init_block, end_block - l1_rpc_config.logs_block_range + 1)
log_info("Block range for historical batches discovery: #{start_block}..#{end_block}")
discover_historical(
sequencer_inbox_address,
start_block,
end_block,
new_batches_limit,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
)
{:ok, start_block}
else
{:ok, l1_rollup_init_block}
end
end
# Initiates the discovery process for batches within a specified block range.
#
# Invokes the actual discovery process for new batches by calling `do_discover`
# with the provided parameters.
#
# ## Parameters
# - `sequencer_inbox_address`: The SequencerInbox contract address.
# - `start_block`: The starting block number for discovery.
# - `end_block`: The ending block number for discovery.
# - `new_batches_limit`: Limit of new batches to process in one iteration.
# - `messages_to_blocks_shift`: Shift value for message to block number mapping.
# - `l1_rpc_config`: Configuration for L1 RPC calls.
# - `rollup_rpc_config`: Configuration for rollup RPC calls.
#
# ## Returns
# - N/A
defp discover(
sequencer_inbox_address,
start_block,
end_block,
new_batches_limit,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
) do
do_discover(
sequencer_inbox_address,
start_block,
end_block,
new_batches_limit,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
)
end
# Initiates the historical discovery process for batches within a specified block range.
#
# Calls `do_discover` with parameters reversed for start and end blocks to
# process historical data.
#
# ## Parameters
# - `sequencer_inbox_address`: The SequencerInbox contract address.
# - `start_block`: The starting block number for discovery.
# - `end_block`: The ending block number for discovery.
# - `new_batches_limit`: Limit of new batches to process in one iteration.
# - `messages_to_blocks_shift`: Shift value for message to block number mapping.
# - `l1_rpc_config`: Configuration for L1 RPC calls.
# - `rollup_rpc_config`: Configuration for rollup RPC calls.
#
# ## Returns
# - N/A
defp discover_historical(
sequencer_inbox_address,
start_block,
end_block,
new_batches_limit,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
) do
do_discover(
sequencer_inbox_address,
end_block,
start_block,
new_batches_limit,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
)
end
# Performs the discovery of new or historical batches within a specified block range,
# processing and importing the relevant data into the database.
#
# This function retrieves SequencerBatchDelivered event logs from the specified block range
# and processes these logs to identify new batches and their corresponding details. It then
# constructs comprehensive data structures for batches, lifecycle transactions, rollup
# blocks, and rollup transactions. Additionally, it identifies any L2-to-L1 messages that
# have been committed within these batches and updates their status. All discovered and
# processed data are then imported into the database.
#
# ## Parameters
# - `sequencer_inbox_address`: The SequencerInbox contract address used to filter logs.
# - `start_block`: The starting block number for the discovery range.
# - `end_block`: The ending block number for the discovery range.
# - `new_batches_limit`: The maximum number of new batches to process in one iteration.
# - `messages_to_blocks_shift`: The value used to align message counts with rollup block numbers.
# - `l1_rpc_config`: RPC configuration parameters for L1.
# - `rollup_rpc_config`: RPC configuration parameters for rollup data.
#
# ## Returns
# - N/A
defp do_discover(
sequencer_inbox_address,
start_block,
end_block,
new_batches_limit,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
) do
raw_logs =
get_logs_new_batches(
min(start_block, end_block),
max(start_block, end_block),
sequencer_inbox_address,
l1_rpc_config.json_rpc_named_arguments
)
logs =
if end_block >= start_block do
raw_logs
else
Enum.reverse(raw_logs)
end
# Discovered logs are divided into chunks to ensure progress
# in batch discovery, even if an error interrupts the fetching process.
logs
|> Enum.chunk_every(new_batches_limit)
|> Enum.each(fn chunked_logs ->
{batches, lifecycle_txs, rollup_blocks, rollup_txs, committed_txs} =
handle_batches_from_logs(
chunked_logs,
messages_to_blocks_shift,
l1_rpc_config,
rollup_rpc_config
)
{:ok, _} =
Chain.import(%{
arbitrum_lifecycle_transactions: %{params: lifecycle_txs},
arbitrum_l1_batches: %{params: batches},
arbitrum_batch_blocks: %{params: rollup_blocks},
arbitrum_batch_transactions: %{params: rollup_txs},
arbitrum_messages: %{params: committed_txs},
timeout: :infinity
})
end)
end
# Fetches logs for SequencerBatchDelivered events from the SequencerInbox contract within a block range.
#
# Retrieves logs that correspond to SequencerBatchDelivered events, specifically
# from the SequencerInbox contract, between the specified block numbers.
#
# ## Parameters
# - `start_block`: The starting block number for log retrieval.
# - `end_block`: The ending block number for log retrieval.
# - `sequencer_inbox_address`: The address of the SequencerInbox contract.
# - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
#
# ## Returns
# - A list of logs for SequencerBatchDelivered events within the specified block range.
defp get_logs_new_batches(start_block, end_block, sequencer_inbox_address, json_rpc_named_arguments)
when start_block <= end_block do
{:ok, logs} =
IndexerHelper.get_logs(
start_block,
end_block,
sequencer_inbox_address,
[@message_sequencer_batch_delivered],
json_rpc_named_arguments
)
if length(logs) > 0 do
log_debug("Found #{length(logs)} SequencerBatchDelivered logs")
end
logs
end
# Processes logs to extract batch information and prepare it for database import.
#
# This function analyzes SequencerBatchDelivered event logs to identify new batches
# and retrieves their details, avoiding the reprocessing of batches already known
# in the database. It enriches the details of new batches with data from corresponding
# L1 transactions and blocks, including timestamps and block ranges. The function
# then prepares batches, associated rollup blocks and transactions, and lifecycle
# transactions for database import. Additionally, L2-to-L1 messages initiated in the
# rollup blocks associated with the discovered batches are retrieved from the database,
# marked as `:sent`, and prepared for database import.
#
# ## Parameters
# - `logs`: The list of SequencerBatchDelivered event logs.
# - `msg_to_block_shift`: The shift value for mapping batch messages to block numbers.
# - `l1_rpc_config`: The RPC configuration for L1 requests.
# - `rollup_rpc_config`: The RPC configuration for rollup data requests.
#
# ## Returns
# - A tuple containing lists of batches, lifecycle transactions, rollup blocks,
# rollup transactions, and committed messages (with the status `:sent`), all
# ready for database import.
defp handle_batches_from_logs(
logs,
msg_to_block_shift,
%{
json_rpc_named_arguments: json_rpc_named_arguments,
chunk_size: chunk_size
} = l1_rpc_config,
rollup_rpc_config
) do
existing_batches =
logs
|> parse_logs_to_get_batch_numbers()
|> Db.batches_exist()
{batches, txs_requests, blocks_requests} = parse_logs_for_new_batches(logs, existing_batches)
blocks_to_ts = Rpc.execute_blocks_requests_and_get_ts(blocks_requests, json_rpc_named_arguments, chunk_size)
{lifecycle_txs_wo_indices, batches_to_import} =
execute_tx_requests_parse_txs_calldata(txs_requests, msg_to_block_shift, blocks_to_ts, batches, l1_rpc_config)
{blocks_to_import, rollup_txs_to_import} = get_rollup_blocks_and_transactions(batches_to_import, rollup_rpc_config)
lifecycle_txs =
lifecycle_txs_wo_indices
|> Db.get_indices_for_l1_transactions()
tx_counts_per_batch = batches_to_rollup_txs_amounts(rollup_txs_to_import)
batches_list_to_import =
batches_to_import
|> Map.values()
|> Enum.reduce([], fn batch, updated_batches_list ->
[
batch
|> Map.put(:commitment_id, get_l1_tx_id_by_hash(lifecycle_txs, batch.tx_hash))
|> Map.put(
:transactions_count,
case tx_counts_per_batch[batch.number] do
nil -> 0
value -> value
end
)
|> Map.drop([:tx_hash])
| updated_batches_list
]
end)
committed_txs =
blocks_to_import
|> Map.keys()
|> Enum.max()
|> get_committed_l2_to_l1_messages()
{batches_list_to_import, Map.values(lifecycle_txs), Map.values(blocks_to_import), rollup_txs_to_import,
committed_txs}
end
# Extracts batch numbers from logs of SequencerBatchDelivered events.
defp parse_logs_to_get_batch_numbers(logs) do
logs
|> Enum.map(fn event ->
{batch_num, _, _} = sequencer_batch_delivered_event_parse(event)
batch_num
end)
end
# Parses logs representing SequencerBatchDelivered events to identify new batches.
#
# This function sifts through logs of SequencerBatchDelivered events, extracts the
# necessary data, and assembles a map of new batch descriptions. Additionally, it
# prepares RPC `eth_getTransactionByHash` and `eth_getBlockByNumber` requests to
# fetch details not present in the logs. To minimize subsequent RPC calls, only
# batches not previously known (i.e., absent in `existing_batches`) are processed.
#
# ## Parameters
# - `logs`: A list of event logs to be processed.
# - `existing_batches`: A list of batch numbers already processed.
#
# ## Returns
# - A tuple containing:
# - A map of new batch descriptions, which are not yet ready for database import.
# - A list of RPC `eth_getTransactionByHash` requests for fetching details of
# the L1 transactions associated with these batches.
# - A list of RPC requests to fetch details of the L1 blocks where these batches
# were included.
defp parse_logs_for_new_batches(logs, existing_batches) do
{batches, txs_requests, blocks_requests} =
logs
|> Enum.reduce({%{}, [], %{}}, fn event, {batches, txs_requests, blocks_requests} ->
{batch_num, before_acc, after_acc} = sequencer_batch_delivered_event_parse(event)
tx_hash_raw = event["transactionHash"]
tx_hash = Rpc.string_hash_to_bytes_hash(tx_hash_raw)
blk_num = quantity_to_integer(event["blockNumber"])
if batch_num in existing_batches do
{batches, txs_requests, blocks_requests}
else
updated_batches =
Map.put(
batches,
batch_num,
%{
number: batch_num,
before_acc: before_acc,
after_acc: after_acc,
tx_hash: tx_hash
}
)
updated_txs_requests = [
Rpc.transaction_by_hash_request(%{id: 0, hash: tx_hash_raw})
| txs_requests
]
updated_blocks_requests =
Map.put(
blocks_requests,
blk_num,
BlockByNumber.request(%{id: 0, number: blk_num}, false, true)
)
log_info("New batch #{batch_num} found in #{tx_hash_raw}")
{updated_batches, updated_txs_requests, updated_blocks_requests}
end
end)
{batches, txs_requests, Map.values(blocks_requests)}
end
# Parses SequencerBatchDelivered event to get batch sequence number and associated accumulators
defp sequencer_batch_delivered_event_parse(event) do
[_, batch_sequence_number, before_acc, after_acc] = event["topics"]
{quantity_to_integer(batch_sequence_number), before_acc, after_acc}
end
# Executes transaction requests and parses the calldata to extract batch data.
#
# This function processes a list of RPC `eth_getTransactionByHash` requests, extracts
# and decodes the calldata from the transactions to obtain batch details. It updates
# the provided batch map with block ranges for new batches and constructs a map of
# lifecycle transactions with their timestamps and finalization status.
#
# ## Parameters
# - `txs_requests`: The list of RPC requests to fetch transaction data.
# - `msg_to_block_shift`: The shift value to adjust the message count to the correct
# rollup block numbers.
# - `blocks_to_ts`: A map of block numbers to their timestamps, required to complete
# data for corresponding lifecycle transactions.
# - `batches`: The current batch data to be updated.
# - A configuration map containing JSON RPC arguments, a track finalization flag,
# and a chunk size for batch processing.
#
# ## Returns
# - A tuple containing:
# - A map of lifecycle (L1) transactions, which are not yet compatible with
# database import and require further processing.
# - An updated map of batch descriptions, also requiring further processing
# before database import.
defp execute_tx_requests_parse_txs_calldata(txs_requests, msg_to_block_shift, blocks_to_ts, batches, %{
json_rpc_named_arguments: json_rpc_named_arguments,
track_finalization: track_finalization?,
chunk_size: chunk_size
}) do
txs_requests
|> Enum.chunk_every(chunk_size)
|> Enum.reduce({%{}, batches}, fn chunk, {l1_txs, updated_batches} ->
chunk
# each eth_getTransactionByHash will take time since it returns entire batch
# in `input` which is heavy because contains dozens of rollup blocks
|> Rpc.make_chunked_request(json_rpc_named_arguments, "eth_getTransactionByHash")
|> Enum.reduce({l1_txs, updated_batches}, fn resp, {txs_map, batches_map} ->
block_num = quantity_to_integer(resp["blockNumber"])
tx_hash = Rpc.string_hash_to_bytes_hash(resp["hash"])
# Although they are called messages in the functions' ABI, in fact they are
# rollup blocks
{batch_num, prev_message_count, new_message_count} =
add_sequencer_l2_batch_from_origin_calldata_parse(resp["input"])
# In some cases extracted numbers for messages does not linked directly
# with rollup blocks, for this, the numbers are shifted by a value specific
# for particular rollup
updated_batches_map =
Map.put(
batches_map,
batch_num,
Map.merge(batches_map[batch_num], %{
start_block: prev_message_count + msg_to_block_shift,
end_block: new_message_count + msg_to_block_shift - 1
})
)
updated_txs_map =
Map.put(txs_map, tx_hash, %{
hash: tx_hash,
block_number: block_num,
timestamp: blocks_to_ts[block_num],
status:
if track_finalization? do
:unfinalized
else
:finalized
end
})
{updated_txs_map, updated_batches_map}
end)
end)
end
# Parses calldata of `addSequencerL2BatchFromOrigin` or `addSequencerL2BatchFromBlobs`
# functions to extract batch information.
defp add_sequencer_l2_batch_from_origin_calldata_parse(calldata) do
case calldata do
"0x8f111f3c" <> encoded_params ->
# addSequencerL2BatchFromOrigin(uint256 sequenceNumber, bytes calldata data, uint256 afterDelayedMessagesRead, address gasRefunder, uint256 prevMessageCount, uint256 newMessageCount)
[sequence_number, _data, _after_delayed_messages_read, _gas_refunder, prev_message_count, new_message_count] =
TypeDecoder.decode(
Base.decode16!(encoded_params, case: :lower),
%FunctionSelector{
function: "addSequencerL2BatchFromOrigin",
types: [
{:uint, 256},
:bytes,
{:uint, 256},
:address,
{:uint, 256},
{:uint, 256}
]
}
)
{sequence_number, prev_message_count, new_message_count}
"0x3e5aa082" <> encoded_params ->
# addSequencerL2BatchFromBlobs(uint256 sequenceNumber, uint256 afterDelayedMessagesRead, address gasRefunder, uint256 prevMessageCount, uint256 newMessageCount)
[sequence_number, _after_delayed_messages_read, _gas_refunder, prev_message_count, new_message_count] =
TypeDecoder.decode(
Base.decode16!(encoded_params, case: :lower),
%FunctionSelector{
function: "addSequencerL2BatchFromBlobs",
types: [
{:uint, 256},
{:uint, 256},
:address,
{:uint, 256},
{:uint, 256}
]
}
)
{sequence_number, prev_message_count, new_message_count}
end
end
# Retrieves rollup blocks and transactions for a list of batches.
#
# This function extracts rollup block ranges from each batch's data to determine
# the required blocks. It then fetches existing rollup blocks and transactions from
# the database and recovers any missing data through RPC if necessary.
#
# ## Parameters
# - `batches`: A list of batches, each containing rollup block ranges associated
# with the batch.
# - `rollup_rpc_config`: Configuration for RPC calls to fetch rollup data.
#
# ## Returns
# - A tuple containing:
# - A map of rollup blocks, ready for database import.
# - A list of rollup transactions, ready for database import.
defp get_rollup_blocks_and_transactions(
batches,
rollup_rpc_config
) do
blocks_to_batches = unwrap_rollup_block_ranges(batches)
required_blocks_numbers = Map.keys(blocks_to_batches)
log_info("Identified #{length(required_blocks_numbers)} rollup blocks")
{blocks_to_import_map, txs_to_import_list} =
get_rollup_blocks_and_txs_from_db(required_blocks_numbers, blocks_to_batches)
# While it's not entirely aligned with data integrity principles to recover
# rollup blocks and transactions from RPC that are not yet indexed, it's
# a practical compromise to facilitate the progress of batch discovery. Given
# the potential high frequency of new batch appearances and the substantial
# volume of blocks and transactions, prioritizing discovery process advancement
# is deemed reasonable.
{blocks_to_import, txs_to_import} =
recover_data_if_necessary(
blocks_to_import_map,
txs_to_import_list,
required_blocks_numbers,
blocks_to_batches,
rollup_rpc_config
)
log_info(
"Found #{length(Map.keys(blocks_to_import))} rollup blocks and #{length(txs_to_import)} rollup transactions in DB"
)
{blocks_to_import, txs_to_import}
end
# Unwraps rollup block ranges from batch data to create a block-to-batch number map.
#
# ## Parameters
# - `batches`: A map where keys are batch identifiers and values are structs
# containing the start and end blocks of each batch.
#
# ## Returns
# - A map where each key is a rollup block number and its value is the
# corresponding batch number.
defp unwrap_rollup_block_ranges(batches) do
batches
|> Map.values()
|> Enum.reduce(%{}, fn batch, b_2_b ->
batch.start_block..batch.end_block
|> Enum.reduce(b_2_b, fn block_num, b_2_b_inner ->
Map.put(b_2_b_inner, block_num, batch.number)
end)
end)
end
# Retrieves rollup blocks and transactions from the database based on given block numbers.
#
# This function fetches rollup blocks from the database using provided block numbers.
# For each block, it constructs a map of rollup block details and a list of
# transactions, including the batch number from `blocks_to_batches` mapping, block
# hash, and transaction hash.
#
# ## Parameters
# - `rollup_blocks_numbers`: A list of rollup block numbers to retrieve from the
# database.
# - `blocks_to_batches`: A mapping from block numbers to batch numbers.
#
# ## Returns
# - A tuple containing:
# - A map of rollup blocks associated with the batch numbers, ready for
# database import.
# - A list of transactions, each associated with its respective rollup block
# and batch number, ready for database import.
defp get_rollup_blocks_and_txs_from_db(rollup_blocks_numbers, blocks_to_batches) do
rollup_blocks_numbers
|> Db.rollup_blocks()
|> Enum.reduce({%{}, []}, fn block, {blocks_map, txs_list} ->
batch_num = blocks_to_batches[block.number]
updated_txs_list =
block.transactions
|> Enum.reduce(txs_list, fn tx, acc ->
[%{tx_hash: tx.hash.bytes, batch_number: batch_num} | acc]
end)
updated_blocks_map =
blocks_map
|> Map.put(block.number, %{
block_number: block.number,
batch_number: batch_num,
confirmation_id: nil
})
{updated_blocks_map, updated_txs_list}
end)
end
# Recovers missing rollup blocks and transactions from the RPC if not all required blocks are found in the current data.
#
# This function compares the required rollup block numbers with the ones already
# present in the current data. If some blocks are missing, it retrieves them from
# the RPC along with their transactions. The retrieved blocks and transactions
# are then merged with the current data to ensure a complete set for further
# processing.
#
# ## Parameters
# - `current_rollup_blocks`: The map of rollup blocks currently held.
# - `current_rollup_txs`: The list of transactions currently held.
# - `required_blocks_numbers`: A list of block numbers that are required for
# processing.
# - `blocks_to_batches`: A map associating rollup block numbers with batch numbers.
# - `rollup_rpc_config`: Configuration for the RPC calls.
#
# ## Returns
# - A tuple containing the updated map of rollup blocks and the updated list of
# transactions, both are ready for database import.
defp recover_data_if_necessary(
current_rollup_blocks,
current_rollup_txs,
required_blocks_numbers,
blocks_to_batches,
rollup_rpc_config
) do
required_blocks_amount = length(required_blocks_numbers)
found_blocks_numbers = Map.keys(current_rollup_blocks)
found_blocks_numbers_length = length(found_blocks_numbers)
if found_blocks_numbers_length != required_blocks_amount do
log_info("Only #{found_blocks_numbers_length} of #{required_blocks_amount} rollup blocks found in DB")
{recovered_blocks_map, recovered_txs_list, _} =
recover_rollup_blocks_and_txs_from_rpc(
required_blocks_numbers,
found_blocks_numbers,
blocks_to_batches,
rollup_rpc_config
)
{Map.merge(current_rollup_blocks, recovered_blocks_map), current_rollup_txs ++ recovered_txs_list}
else
{current_rollup_blocks, current_rollup_txs}
end
end
# Recovers missing rollup blocks and their transactions from RPC based on required block numbers.
#
# This function identifies missing rollup blocks by comparing the required block
# numbers with those already found. It then fetches the missing blocks in chunks
# using JSON RPC calls, aggregating the results into a map of rollup blocks and
# a list of transactions. The data is processed to ensure each block and its
# transactions are correctly associated with their batch number.
#
# ## Parameters
# - `required_blocks_numbers`: A list of block numbers that are required to be
# fetched.
# - `found_blocks_numbers`: A list of block numbers that have already been
# fetched.
# - `blocks_to_batches`: A map linking block numbers to their respective batch
# numbers.
# - `rollup_rpc_config`: A map containing configuration parameters including
# JSON RPC arguments for rollup RPC and the chunk size
# for batch processing.
#
# ## Returns
# - A tuple containing:
# - A map of rollup blocks associated with the batch numbers, ready for
# database import.
# - A list of transactions, each associated with its respective rollup block
# and batch number, ready for database import.
# - The updated counter of processed chunks (usually ignored).
defp recover_rollup_blocks_and_txs_from_rpc(
required_blocks_numbers,
found_blocks_numbers,
blocks_to_batches,
%{
json_rpc_named_arguments: rollup_json_rpc_named_arguments,
chunk_size: rollup_chunk_size
} = _rollup_rpc_config
) do
missed_blocks = required_blocks_numbers -- found_blocks_numbers
missed_blocks_length = length(missed_blocks)
missed_blocks
|> Enum.sort()
|> Enum.chunk_every(rollup_chunk_size)
|> Enum.reduce({%{}, [], 0}, fn chunk, {blocks_map, txs_list, chunks_counter} ->
Logging.log_details_chunk_handling(
"Collecting rollup data",
{"block", "blocks"},
chunk,
chunks_counter,
missed_blocks_length
)
requests =
chunk
|> Enum.reduce([], fn block_num, requests_list ->
[
BlockByNumber.request(
%{
id: blocks_to_batches[block_num],
number: block_num
},
false
)
| requests_list
]
end)
{blocks_map_updated, txs_list_updated} =
requests
|> Rpc.make_chunked_request_keep_id(rollup_json_rpc_named_arguments, "eth_getBlockByNumber")
|> prepare_rollup_block_map_and_transactions_list(blocks_map, txs_list)
{blocks_map_updated, txs_list_updated, chunks_counter + length(chunk)}
end)
end
# Processes JSON responses to construct a mapping of rollup block information and a list of transactions.
#
# This function takes JSON RPC responses for rollup blocks and processes each
# response to create a mapping of rollup block details and a comprehensive list
# of transactions associated with these blocks. It ensures that each block and its
# corresponding transactions are correctly associated with their batch number.
#
# ## Parameters
# - `json_responses`: A list of JSON RPC responses containing rollup block data.
# - `rollup_blocks`: The initial map of rollup block information.
# - `rollup_txs`: The initial list of rollup transactions.
#
# ## Returns
# - A tuple containing:
# - An updated map of rollup blocks associated with their batch numbers, ready
# for database import.
# - An updated list of transactions, each associated with its respective rollup
# block and batch number, ready for database import.
defp prepare_rollup_block_map_and_transactions_list(json_responses, rollup_blocks, rollup_txs) do
json_responses
|> Enum.reduce({rollup_blocks, rollup_txs}, fn resp, {blocks_map, txs_list} ->
batch_num = resp.id
blk_num = quantity_to_integer(resp.result["number"])
updated_blocks_map =
Map.put(
blocks_map,
blk_num,
%{block_number: blk_num, batch_number: batch_num, confirmation_id: nil}
)
updated_txs_list =
case resp.result["transactions"] do
nil ->
txs_list
new_txs ->
Enum.reduce(new_txs, txs_list, fn l2_tx_hash, txs_list ->
[%{tx_hash: l2_tx_hash, batch_number: batch_num} | txs_list]
end)
end
{updated_blocks_map, updated_txs_list}
end)
end
# Retrieves the unique identifier of an L1 transaction by its hash from the given
# map. `nil` if there is no such transaction in the map.
defp get_l1_tx_id_by_hash(l1_txs, hash) do
l1_txs
|> Map.get(hash)
|> Kernel.||(%{id: nil})
|> Map.get(:id)
end
# Aggregates rollup transactions by batch number, counting the number of transactions in each batch.
defp batches_to_rollup_txs_amounts(rollup_txs) do
rollup_txs
|> Enum.reduce(%{}, fn tx, acc ->
Map.put(acc, tx.batch_number, Map.get(acc, tx.batch_number, 0) + 1)
end)
end
# Retrieves initiated L2-to-L1 messages up to specified block number and marks them as 'sent'.
defp get_committed_l2_to_l1_messages(block_number) do
block_number
|> Db.initiated_l2_to_l1_messages()
|> Enum.map(fn tx ->
Map.put(tx, :status, :sent)
end)
end
end

@ -0,0 +1,413 @@
defmodule Indexer.Fetcher.Arbitrum.Workers.NewL1Executions do
@moduledoc """
Coordinates the discovery and processing of new and historical L2-to-L1 message executions for an Arbitrum rollup.
This module is responsible for identifying and importing executions of messages
that were initiated from Arbitrum's Layer 2 (L2) and are to be relayed to
Layer 1 (L1). It handles both new executions that are currently occurring on L1
and historical executions that occurred in the past but have not yet been
processed.
Discovery of these message executions involves parsing logs for
`OutBoxTransactionExecuted` events emitted by the Arbitrum outbox contract. As
the logs do not provide comprehensive data for constructing the related
lifecycle transactions, the module executes batched RPC calls to
`eth_getBlockByNumber`, using the responses to obtain transaction timestamps,
thereby enriching the lifecycle transaction data.
"""
import EthereumJSONRPC, only: [quantity_to_integer: 1]
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_info: 1, log_debug: 1]
alias EthereumJSONRPC.Block.ByNumber, as: BlockByNumber
import Explorer.Helper, only: [decode_data: 2]
alias Indexer.Fetcher.Arbitrum.Utils.Helper, as: ArbitrumHelper
alias Indexer.Fetcher.Arbitrum.Utils.{Db, Rpc}
alias Indexer.Helper, as: IndexerHelper
alias Explorer.Chain
require Logger
# keccak256("OutBoxTransactionExecuted(address,address,uint256,uint256)")
@outbox_transaction_executed_event "0x20af7f3bbfe38132b8900ae295cd9c8d1914be7052d061a511f3f728dab18964"
@outbox_transaction_executed_unindexed_params [{:uint, 256}]
@doc """
Discovers and processes new executions of L2-to-L1 messages within the current L1 block range.
This function fetches logs for `OutBoxTransactionExecuted` events within the
specified L1 block range to identify new execution transactions for L2-to-L1
messages, updating their status and linking them with corresponding lifecycle
transactions in the database. Additionally, the function checks unexecuted
L2-to-L1 messages to match them with any newly recorded executions and updates
their status to `:relayed`.
## Parameters
- A map containing:
- `config`: Configuration settings including the Arbitrum outbox contract
address, JSON RPC arguments, and the block range for fetching
logs.
- `data`: Contains the starting block number for new execution discovery.
## Returns
- `{:ok, end_block}`: On successful discovery and processing, where `end_block`
indicates the necessity to consider next block range in the
following iteration of new executions discovery.
- `{:ok, start_block - 1}`: when no new blocks on L1 produced from the last
iteration of the new executions discovery.
"""
@spec discover_new_l1_messages_executions(%{
:config => %{
:l1_outbox_address => binary(),
:l1_rpc => %{
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:logs_block_range => non_neg_integer(),
optional(any()) => any()
},
optional(any()) => any()
},
:data => %{:new_executions_start_block => non_neg_integer(), optional(any()) => any()},
optional(any()) => any()
}) :: {:ok, non_neg_integer()}
def discover_new_l1_messages_executions(
%{
config: %{
l1_rpc: l1_rpc_config,
l1_outbox_address: outbox_address
},
data: %{new_executions_start_block: start_block}
} = _state
) do
# Requesting the "latest" block instead of "safe" allows to catch executions
# without latency.
{:ok, latest_block} =
IndexerHelper.get_block_number_by_tag(
"latest",
l1_rpc_config.json_rpc_named_arguments,
Rpc.get_resend_attempts()
)
end_block = min(start_block + l1_rpc_config.logs_block_range - 1, latest_block)
if start_block <= end_block do
log_info("Block range for new l2-to-l1 messages executions discovery: #{start_block}..#{end_block}")
discover(outbox_address, start_block, end_block, l1_rpc_config)
{:ok, end_block}
else
{:ok, start_block - 1}
end
end
@doc """
Discovers and processes historical executions of L2-to-L1 messages within a calculated L1 block range.
This function fetches logs for `OutBoxTransactionExecuted` events within the
calculated L1 block range. It then processes these logs to identify execution
transactions for L2-to-L1 messages, updating their status and linking them with
corresponding lifecycle transactions in the database. Additionally, the
function goes through unexecuted L2-to-L1 messages, matches them with the
executions recorded in the database up to this moment, and updates the messages'
status to `:relayed`.
## Parameters
- A map containing:
- `config`: Configuration settings including the Arbitrum outbox contract
address, the initialization block for the rollup, and JSON RPC
arguments.
- `data`: Contains the ending block number for the historical execution
discovery.
## Returns
- `{:ok, start_block}`: On successful discovery and processing, where
`start_block` indicates the necessity to consider another block range in the
next iteration of historical executions discovery.
- `{:ok, l1_rollup_init_block}`: If the historical discovery process has reached
the rollup initialization block, indicating that no further action is needed.
"""
@spec discover_historical_l1_messages_executions(%{
:config => %{
:l1_outbox_address => binary(),
:l1_rollup_init_block => non_neg_integer(),
:l1_rpc => %{
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:logs_block_range => non_neg_integer(),
optional(any()) => any()
},
optional(any()) => any()
},
:data => %{:historical_executions_end_block => non_neg_integer(), optional(any()) => any()},
optional(any()) => any()
}) :: {:ok, non_neg_integer()}
def discover_historical_l1_messages_executions(
%{
config: %{
l1_rpc: l1_rpc_config,
l1_outbox_address: outbox_address,
l1_rollup_init_block: l1_rollup_init_block
},
data: %{historical_executions_end_block: end_block}
} = _state
) do
if end_block >= l1_rollup_init_block do
start_block = max(l1_rollup_init_block, end_block - l1_rpc_config.logs_block_range + 1)
log_info("Block range for historical l2-to-l1 messages executions discovery: #{start_block}..#{end_block}")
discover(outbox_address, start_block, end_block, l1_rpc_config)
{:ok, start_block}
else
{:ok, l1_rollup_init_block}
end
end
# Discovers and imports execution transactions for L2-to-L1 messages within a specified L1 block range.
#
# This function fetches logs for `OutBoxTransactionExecuted` events within the
# specified L1 block range to discover new execution transactions. It processes
# these logs to extract execution details and associated lifecycle transactions,
# which are then imported into the database. For lifecycle timestamps not
# available in the logs, RPC calls to `eth_getBlockByNumber` are made to fetch
# the necessary data. Furthermore, the function checks unexecuted L2-to-L1
# messages to match them with any recorded executions, updating their status to
# `:relayed` and establishing links with the corresponding lifecycle
# transactions. These updated messages are also imported into the database.
#
# ## Parameters
# - `outbox_address`: The address of the Arbitrum outbox contract to filter the
# logs.
# - `start_block`: The starting block number for log retrieval.
# - `end_block`: The ending block number for log retrieval.
# - `l1_rpc_config`: Configuration parameters including JSON RPC arguments and
# settings for processing the logs.
#
# ## Returns
# - N/A
defp discover(outbox_address, start_block, end_block, l1_rpc_config) do
logs =
get_logs_for_new_executions(
start_block,
end_block,
outbox_address,
l1_rpc_config.json_rpc_named_arguments
)
{lifecycle_txs, executions} = get_executions_from_logs(logs, l1_rpc_config)
unless executions == [] do
log_info("Executions for #{length(executions)} L2 messages will be imported")
{:ok, _} =
Chain.import(%{
arbitrum_lifecycle_transactions: %{params: lifecycle_txs},
arbitrum_l1_executions: %{params: executions},
timeout: :infinity
})
end
# Inspects all unexecuted messages to potentially mark them as completed,
# addressing the scenario where found executions may correspond to messages
# that have not yet been indexed. This ensures that as soon as a new unexecuted
# message is added to the database, it can be marked as relayed, considering
# the execution transactions that have already been indexed.
messages = get_relayed_messages(end_block)
unless messages == [] do
log_info("Marking #{length(messages)} l2-to-l1 messages as completed")
{:ok, _} =
Chain.import(%{
arbitrum_messages: %{params: messages},
timeout: :infinity
})
end
end
# Retrieves logs representing `OutBoxTransactionExecuted` events between the specified blocks.
defp get_logs_for_new_executions(start_block, end_block, outbox_address, json_rpc_named_arguments)
when start_block <= end_block do
{:ok, logs} =
IndexerHelper.get_logs(
start_block,
end_block,
outbox_address,
[@outbox_transaction_executed_event],
json_rpc_named_arguments
)
if length(logs) > 0 do
log_debug("Found #{length(logs)} OutBoxTransactionExecuted logs")
end
logs
end
# Extracts and processes execution details from logs for L2-to-L1 message transactions.
#
# This function parses logs representing `OutBoxTransactionExecuted` events to
# extract basic execution details. It then requests block timestamps and
# associates them with the extracted executions, forming lifecycle transactions
# enriched with timestamps and finalization statuses. Subsequently, unique
# identifiers for the lifecycle transactions are determined, and the connection
# between execution records and lifecycle transactions is established.
#
# ## Parameters
# - `logs`: A collection of log entries to be processed.
# - `l1_rpc_config`: Configuration parameters including JSON RPC arguments,
# chunk size for RPC calls, and a flag indicating whether to track the
# finalization of transactions.
#
# ## Returns
# - A tuple containing:
# - A list of lifecycle transactions with updated timestamps, finalization
# statuses, and unique identifiers.
# - A list of detailed execution information for L2-to-L1 messages.
# Both lists are prepared for database importation.
defp get_executions_from_logs(
logs,
%{
json_rpc_named_arguments: json_rpc_named_arguments,
chunk_size: chunk_size,
track_finalization: track_finalization?
} = _l1_rpc_config
) do
{basics_executions, basic_lifecycle_txs, blocks_requests} = parse_logs_for_new_executions(logs)
blocks_to_ts = Rpc.execute_blocks_requests_and_get_ts(blocks_requests, json_rpc_named_arguments, chunk_size)
lifecycle_txs =
basic_lifecycle_txs
|> ArbitrumHelper.extend_lifecycle_txs_with_ts_and_status(blocks_to_ts, track_finalization?)
|> Db.get_indices_for_l1_transactions()
executions =
basics_executions
|> Enum.reduce([], fn execution, updated_executions ->
updated =
execution
|> Map.put(:execution_id, lifecycle_txs[execution.execution_tx_hash].id)
|> Map.drop([:execution_tx_hash])
[updated | updated_executions]
end)
{Map.values(lifecycle_txs), executions}
end
# Parses logs to extract new execution transactions for L2-to-L1 messages.
#
# This function processes log entries to identify `OutBoxTransactionExecuted`
# events, extracting the message ID, transaction hash, and block number for
# each. It accumulates this data into execution details, lifecycle
# transaction descriptions, and RPC requests for block information. These
# are then used in subsequent steps to finalize the execution status of the
# messages.
#
# ## Parameters
# - `logs`: A collection of log entries to be processed.
#
# ## Returns
# - A tuple containing:
# - `executions`: A list of details for execution transactions related to
# L2-to-L1 messages.
# - `lifecycle_txs`: A map of lifecycle transaction details, keyed by L1
# transaction hash.
# - `blocks_requests`: A list of RPC requests for fetching block data where
# the executions occurred.
defp parse_logs_for_new_executions(logs) do
{executions, lifecycle_txs, blocks_requests} =
logs
|> Enum.reduce({[], %{}, %{}}, fn event, {executions, lifecycle_txs, blocks_requests} ->
msg_id = outbox_transaction_executed_event_parse(event)
l1_tx_hash_raw = event["transactionHash"]
l1_tx_hash = Rpc.string_hash_to_bytes_hash(l1_tx_hash_raw)
l1_blk_num = quantity_to_integer(event["blockNumber"])
updated_executions = [
%{
message_id: msg_id,
execution_tx_hash: l1_tx_hash
}
| executions
]
updated_lifecycle_txs =
Map.put(
lifecycle_txs,
l1_tx_hash,
%{hash: l1_tx_hash, block_number: l1_blk_num}
)
updated_blocks_requests =
Map.put(
blocks_requests,
l1_blk_num,
BlockByNumber.request(%{id: 0, number: l1_blk_num}, false, true)
)
log_debug("Execution for L2 message ##{msg_id} found in #{l1_tx_hash_raw}")
{updated_executions, updated_lifecycle_txs, updated_blocks_requests}
end)
{executions, lifecycle_txs, Map.values(blocks_requests)}
end
# Parses `OutBoxTransactionExecuted` event data to extract the transaction index parameter
defp outbox_transaction_executed_event_parse(event) do
[transaction_index] = decode_data(event["data"], @outbox_transaction_executed_unindexed_params)
transaction_index
end
# Retrieves unexecuted messages from L2 to L1, marking them as completed if their
# corresponding execution transactions are identified.
#
# This function fetches messages confirmed on L1 up to the specified rollup block
# number and matches these messages with their corresponding execution transactions.
# For matched pairs, it updates the message status to `:relayed` and links them with
# the execution transactions.
#
# ## Parameters
# - `block_number`: The block number up to which messages are considered for
# completion.
#
# ## Returns
# - A list of messages marked as completed, ready for database import.
defp get_relayed_messages(block_number) do
# Assuming that both catchup block fetcher and historical messages catchup fetcher
# will check all discovered historical messages to be marked as executed it is not
# needed to handle :initiated and :sent of historical messages here, only for
# new messages discovered and changed their status from `:sent` to `:confirmed`
confirmed_messages = Db.confirmed_l2_to_l1_messages(block_number)
if Enum.empty?(confirmed_messages) do
[]
else
log_debug("Identified #{length(confirmed_messages)} l2-to-l1 messages already confirmed but not completed")
messages_map =
confirmed_messages
|> Enum.reduce(%{}, fn msg, acc ->
Map.put(acc, msg.message_id, msg)
end)
messages_map
|> Map.keys()
|> Db.l1_executions()
|> Enum.map(fn execution ->
messages_map
|> Map.get(execution.message_id)
|> Map.put(:completion_transaction_hash, execution.execution_transaction.hash.bytes)
|> Map.put(:status, :relayed)
end)
end
end
end

@ -0,0 +1,346 @@
defmodule Indexer.Fetcher.Arbitrum.Workers.NewMessagesToL2 do
@moduledoc """
Manages the discovery and processing of new and historical L1-to-L2 messages initiated on L1 for an Arbitrum rollup.
This module is responsible for identifying and importing messages that are initiated
from Layer 1 (L1) to Arbitrum's Layer 2 (L2). It handles both new messages that are
currently being sent to L2 and historical messages that were sent in the past but
have not yet been processed by the system.
The initiated messages are identified by analyzing logs associated with
`MessageDelivered` events emitted by the Arbitrum bridge contract. These logs
contain almost all the information required to compose the messages, except for the
originator's address, which is obtained by making an RPC call to get the transaction
details.
"""
import EthereumJSONRPC, only: [quantity_to_integer: 1]
import Explorer.Helper, only: [decode_data: 2]
import Indexer.Fetcher.Arbitrum.Utils.Logging, only: [log_info: 1, log_debug: 1]
alias Indexer.Fetcher.Arbitrum.Utils.Rpc
alias Indexer.Helper, as: IndexerHelper
alias Explorer.Chain
require Logger
@types_of_l1_messages_forwarded_to_l2 [3, 7, 9, 12]
# keccak256("MessageDelivered(uint256,bytes32,address,uint8,address,bytes32,uint256,uint64)")
@message_delivered_event "0x5e3c1311ea442664e8b1611bfabef659120ea7a0a2cfc0667700bebc69cbffe1"
@message_delivered_event_unindexed_params [
:address,
{:uint, 8},
:address,
{:bytes, 32},
{:uint, 256},
{:uint, 64}
]
@doc """
Discovers new L1-to-L2 messages initiated on L1 within a configured block range and processes them for database import.
This function calculates the block range for discovering new messages from L1 to L2
based on the latest block number available on the network. It then fetches logs
related to L1-to-L2 events within this range, extracts message details from both
the log and the corresponding L1 transaction, and imports them into the database.
## Parameters
- A map containing:
- `config`: Configuration settings including JSON RPC arguments for L1, Arbitrum
bridge address, RPC block range, and chunk size for RPC calls.
- `data`: Contains the starting block number for new L1-to-L2 message discovery.
## Returns
- `{:ok, end_block}`: On successful discovery and processing, where `end_block`
indicates the necessity to consider next block range in the
following iteration of new message discovery.
- `{:ok, start_block - 1}`: when no new blocks on L1 produced from the last
iteration of the new message discovery.
"""
@spec discover_new_messages_to_l2(%{
:config => %{
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:l1_bridge_address => binary(),
:l1_rpc_block_range => non_neg_integer(),
:l1_rpc_chunk_size => non_neg_integer(),
optional(any()) => any()
},
:data => %{
:new_msg_to_l2_start_block => non_neg_integer(),
optional(any()) => any()
},
optional(any()) => any()
}) :: {:ok, non_neg_integer()}
def discover_new_messages_to_l2(
%{
config: %{
json_l1_rpc_named_arguments: json_rpc_named_arguments,
l1_rpc_chunk_size: chunk_size,
l1_rpc_block_range: rpc_block_range,
l1_bridge_address: bridge_address
},
data: %{new_msg_to_l2_start_block: start_block}
} = _state
) do
# Requesting the "latest" block instead of "safe" allows to get messages originated to L2
# much earlier than they will be seen by the Arbitrum Sequencer.
{:ok, latest_block} =
IndexerHelper.get_block_number_by_tag(
"latest",
json_rpc_named_arguments,
Rpc.get_resend_attempts()
)
end_block = min(start_block + rpc_block_range - 1, latest_block)
if start_block <= end_block do
log_info("Block range for discovery new messages from L1: #{start_block}..#{end_block}")
discover(
bridge_address,
start_block,
end_block,
json_rpc_named_arguments,
chunk_size
)
{:ok, end_block}
else
{:ok, start_block - 1}
end
end
@doc """
Discovers historical L1-to-L2 messages initiated on L1 within the configured block range and processes them for database import.
This function calculates the block range for message discovery and targets historical
messages from L1 to L2 by querying the specified block range on L1. The discovery is
conducted by fetching logs related to L1-to-L2 events, extracting message details
from both the log and the corresponding L1 transaction, and importing them into
the database.
## Parameters
- A map containing:
- `config`: Configuration settings including JSON RPC arguments for L1, Arbitrum
bridge address, rollup initialization block, block range, and chunk
size for RPC calls.
- `data`: Contains the end block for historical L1-to-L2 message discovery.
## Returns
- `{:ok, start_block}`: On successful discovery and processing, where `start_block`
indicates the necessity to consider another block range in
the next iteration of message discovery.
- `{:ok, l1_rollup_init_block}`: If the discovery process already reached rollup
initialization block and no discovery action was
necessary.
"""
@spec discover_historical_messages_to_l2(%{
:config => %{
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:l1_bridge_address => binary(),
:l1_rollup_init_block => non_neg_integer(),
:l1_rpc_block_range => non_neg_integer(),
:l1_rpc_chunk_size => non_neg_integer(),
optional(any()) => any()
},
:data => %{
:historical_msg_to_l2_end_block => non_neg_integer(),
optional(any()) => any()
},
optional(any()) => any()
}) :: {:ok, non_neg_integer()}
def discover_historical_messages_to_l2(
%{
config: %{
json_l1_rpc_named_arguments: json_rpc_named_arguments,
l1_rpc_chunk_size: chunk_size,
l1_rpc_block_range: rpc_block_range,
l1_bridge_address: bridge_address,
l1_rollup_init_block: l1_rollup_init_block
},
data: %{historical_msg_to_l2_end_block: end_block}
} = _state
) do
if end_block >= l1_rollup_init_block do
start_block = max(l1_rollup_init_block, end_block - rpc_block_range + 1)
log_info("Block range for discovery historical messages from L1: #{start_block}..#{end_block}")
discover(
bridge_address,
start_block,
end_block,
json_rpc_named_arguments,
chunk_size
)
{:ok, start_block}
else
{:ok, l1_rollup_init_block}
end
end
# Discovers and imports L1-to-L2 messages initiated on L1 within a specified block range.
#
# This function discovers messages initiated on L1 for transferring information from L1 to L2
# by retrieving relevant logs within the specified block range on L1, focusing on
# `MessageDelivered` events. It processes these logs to extract and construct message
# details. For information not present in the events, RPC calls are made to fetch additional
# transaction details. The discovered messages are then imported into the database.
#
# ## Parameters
# - `bridge_address`: The address of the Arbitrum bridge contract used to filter the logs.
# - `start_block`: The starting block number for log retrieval.
# - `end_block`: The ending block number for log retrieval.
# - `json_rpc_named_argument`: Configuration parameters for the JSON RPC connection.
# - `chunk_size`: The size of chunks for processing RPC calls in batches.
#
# ## Returns
# - N/A
defp discover(bridge_address, start_block, end_block, json_rpc_named_argument, chunk_size) do
logs =
get_logs_for_l1_to_l2_messages(
start_block,
end_block,
bridge_address,
json_rpc_named_argument
)
messages = get_messages_from_logs(logs, json_rpc_named_argument, chunk_size)
unless messages == [] do
log_info("Origins of #{length(messages)} L1-to-L2 messages will be imported")
end
{:ok, _} =
Chain.import(%{
arbitrum_messages: %{params: messages},
timeout: :infinity
})
end
# Retrieves logs representing the `MessageDelivered` events.
defp get_logs_for_l1_to_l2_messages(start_block, end_block, bridge_address, json_rpc_named_arguments)
when start_block <= end_block do
{:ok, logs} =
IndexerHelper.get_logs(
start_block,
end_block,
bridge_address,
[@message_delivered_event],
json_rpc_named_arguments
)
if length(logs) > 0 do
log_debug("Found #{length(logs)} MessageDelivered logs")
end
logs
end
# Extracts complete message details from the provided logs and prepares them for
# database insertion.
#
# This function filters and parses the logs to identify L1-to-L2 messages,
# generating corresponding RPC requests to fetch additional transaction data.
# It executes these RPC requests to obtain the `from` address of each transaction.
# It then completes each message description by merging the fetched `from`
# address and setting the status to `:initiated`, making them ready for database
# import.
#
# ## Parameters
# - `logs`: A list of log entries to be processed.
# - `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
# - `chunk_size`: The size of chunks for batch processing transactions.
#
# ## Returns
# - A list of maps describing discovered messages compatible with the database
# import operation.
defp get_messages_from_logs(logs, json_rpc_named_arguments, chunk_size) do
{messages, txs_requests} = parse_logs_for_l1_to_l2_messages(logs)
txs_to_from = Rpc.execute_transactions_requests_and_get_from(txs_requests, json_rpc_named_arguments, chunk_size)
Enum.map(messages, fn msg ->
Map.merge(msg, %{
originator_address: txs_to_from[msg.originating_transaction_hash],
status: :initiated
})
end)
end
# Parses logs to extract L1-to-L2 message details and prepares RPC requests for transaction data.
#
# This function processes log entries corresponding to `MessageDelivered` events, filtering out
# L1-to-L2 messages identified by one of the following message types: `3`, `17`, `9`, `12`.
# Utilizing information from both the transaction and the log, the function constructs maps
# that partially describe each message and prepares RPC `eth_getTransactionByHash` requests to fetch
# the remaining data needed to complete these message descriptions.
#
# ## Parameters
# - `logs`: A collection of log entries to be processed.
#
# ## Returns
# - A tuple comprising:
# - `messages`: A list of maps, each containing an incomplete representation of a message.
# - `txs_requests`: A list of RPC request `eth_getTransactionByHash` structured to fetch
# additional data needed to finalize the message descriptions.
defp parse_logs_for_l1_to_l2_messages(logs) do
{messages, txs_requests} =
logs
|> Enum.reduce({[], %{}}, fn event, {messages, txs_requests} ->
{msg_id, type, ts} = message_delivered_event_parse(event)
if type in @types_of_l1_messages_forwarded_to_l2 do
tx_hash = event["transactionHash"]
blk_num = quantity_to_integer(event["blockNumber"])
updated_messages = [
%{
direction: :to_l2,
message_id: msg_id,
originating_transaction_hash: tx_hash,
origination_timestamp: ts,
originating_transaction_block_number: blk_num
}
| messages
]
updated_txs_requests =
Map.put(
txs_requests,
tx_hash,
Rpc.transaction_by_hash_request(%{id: 0, hash: tx_hash})
)
log_debug("L1 to L2 message #{tx_hash} found with the type #{type}")
{updated_messages, updated_txs_requests}
else
{messages, txs_requests}
end
end)
{messages, Map.values(txs_requests)}
end
# Parses the `MessageDelivered` event to extract relevant message details.
defp message_delivered_event_parse(event) do
[
_inbox,
kind,
_sender,
_message_data_hash,
_base_fee_l1,
timestamp
] = decode_data(event["data"], @message_delivered_event_unindexed_params)
message_index = quantity_to_integer(Enum.at(event["topics"], 1))
{message_index, kind, Timex.from_unix(timestamp)}
end
end

@ -159,6 +159,7 @@ defmodule Indexer.Fetcher.Optimism do
non_neg_integer()
) :: {:ok, list()} | {:error, term()}
def get_logs(from_block, to_block, address, topic0, json_rpc_named_arguments, retries) do
# TODO: use the function from the Indexer.Helper module
processed_from_block = if is_integer(from_block), do: integer_to_quantity(from_block), else: from_block
processed_to_block = if is_integer(to_block), do: integer_to_quantity(to_block), else: to_block

@ -516,6 +516,7 @@ defmodule Indexer.Fetcher.PolygonEdge do
non_neg_integer()
) :: {:ok, list()} | {:error, term()}
def get_logs(from_block, to_block, address, topic0, json_rpc_named_arguments, retries) do
# TODO: use the function from the Indexer.Helper module
processed_from_block = if is_integer(from_block), do: integer_to_quantity(from_block), else: from_block
processed_to_block = if is_integer(to_block), do: integer_to_quantity(to_block), else: to_block

@ -21,8 +21,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
alias EthereumJSONRPC.Logs
alias Explorer.Chain
alias Explorer.Chain.PolygonZkevm.Reader
alias Explorer.SmartContract.Reader, as: SmartContractReader
alias Indexer.Helper
alias Indexer.Helper, as: IndexerHelper
alias Indexer.Transform.Addresses
# 32-byte signature of the event BridgeEvent(uint8 leafType, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, uint32 depositCount)
@ -68,8 +67,11 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
@spec filter_bridge_events(list(), binary()) :: list()
def filter_bridge_events(events, bridge_contract) do
Enum.filter(events, fn event ->
Helper.address_hash_to_string(event.address_hash, true) == bridge_contract and
Enum.member?([@bridge_event, @claim_event_v1, @claim_event_v2], Helper.log_topic_to_string(event.first_topic))
IndexerHelper.address_hash_to_string(event.address_hash, true) == bridge_contract and
Enum.member?(
[@bridge_event, @claim_event_v1, @claim_event_v2],
IndexerHelper.log_topic_to_string(event.first_topic)
)
end)
end
@ -111,7 +113,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
error_message = &"Cannot fetch logs for the block range #{from_block}..#{to_block}. Error: #{inspect(&1)}"
Helper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, retries)
IndexerHelper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, retries)
end
@doc """
@ -239,7 +241,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
defp blocks_to_timestamps(events, json_rpc_named_arguments) do
events
|> Helper.get_blocks_by_events(json_rpc_named_arguments, 100_000_000)
|> IndexerHelper.get_blocks_by_events(json_rpc_named_arguments, 100_000_000)
|> Enum.reduce(%{}, fn block, acc ->
block_number = quantity_to_integer(Map.get(block, "number"))
timestamp = timestamp_to_datetime(Map.get(block, "timestamp"))
@ -384,14 +386,16 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
tokens_not_inserted =
tokens_to_insert
|> Enum.reject(fn token ->
Enum.any?(tokens_inserted, fn inserted -> token.address == Helper.address_hash_to_string(inserted.address) end)
Enum.any?(tokens_inserted, fn inserted ->
token.address == IndexerHelper.address_hash_to_string(inserted.address)
end)
end)
|> Enum.map(& &1.address)
tokens_inserted_outside = Reader.token_addresses_to_ids_from_db(tokens_not_inserted)
tokens_inserted
|> Enum.reduce(%{}, fn t, acc -> Map.put(acc, Helper.address_hash_to_string(t.address), t.id) end)
|> Enum.reduce(%{}, fn t, acc -> Map.put(acc, IndexerHelper.address_hash_to_string(t.address), t.id) end)
|> Map.merge(tokens_existing)
|> Map.merge(tokens_inserted_outside)
end
@ -429,7 +433,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
if status == :ok do
response = parse_response(response)
address = Helper.address_hash_to_string(request.contract_address, true)
address = IndexerHelper.address_hash_to_string(request.contract_address, true)
new_data = get_new_data(token_data_acc[address] || %{}, request, response)
@ -455,7 +459,8 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
end)
|> List.flatten()
{responses, error_messages} = read_contracts_with_retries(requests, @erc20_abi, json_rpc_named_arguments, 3)
{responses, error_messages} =
IndexerHelper.read_contracts_with_retries(requests, @erc20_abi, json_rpc_named_arguments, 3)
if not Enum.empty?(error_messages) or Enum.count(requests) != Enum.count(responses) do
Logger.warning(
@ -466,33 +471,6 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
{requests, responses}
end
defp read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left) when retries_left > 0 do
responses = SmartContractReader.query_contracts(requests, abi, json_rpc_named_arguments: json_rpc_named_arguments)
error_messages =
Enum.reduce(responses, [], fn {status, error_message}, acc ->
acc ++
if status == :error do
[error_message]
else
[]
end
end)
if Enum.empty?(error_messages) do
{responses, []}
else
retries_left = retries_left - 1
if retries_left == 0 do
{responses, Enum.uniq(error_messages)}
else
:timer.sleep(1000)
read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left)
end
end
end
defp get_new_data(data, request, response) do
if atomized_key(request.method_id) == :symbol do
Map.put(data, :symbol, response)

@ -135,6 +135,8 @@ defmodule Indexer.Fetcher.ZkSync.Utils.Db do
`zksync_lifecycle_l1_transactions` table.
"""
@spec get_indices_for_l1_transactions(map()) :: any()
# TODO: consider a way to remove duplicate with Arbitrum.Utils.Db
# credo:disable-for-next-line Credo.Check.Design.DuplicatedCode
def get_indices_for_l1_transactions(new_l1_txs)
when is_map(new_l1_txs) do
# Get indices for l1 transactions previously handled

@ -84,16 +84,16 @@ defmodule Indexer.Fetcher.ZkSync.Utils.Rpc do
end
end
defp json_txid_to_hash(hash) do
defp json_tx_id_to_hash(hash) do
case hash do
"0x" <> tx_hash -> tx_hash
nil -> @zero_hash
end
end
defp strhash_to_byteshash(hash) do
defp string_hash_to_bytes_hash(hash) do
hash
|> json_txid_to_hash()
|> json_tx_id_to_hash()
|> Base.decode16!(case: :mixed)
end
@ -139,8 +139,8 @@ defmodule Indexer.Fetcher.ZkSync.Utils.Rpc do
case transform_type do
:iso8601_to_datetime -> from_iso8601_to_datetime(value_in_json_response)
:ts_to_datetime -> from_ts_to_datetime(value_in_json_response)
:str_to_txhash -> json_txid_to_hash(value_in_json_response)
:str_to_byteshash -> strhash_to_byteshash(value_in_json_response)
:str_to_txhash -> json_tx_id_to_hash(value_in_json_response)
:str_to_byteshash -> string_hash_to_bytes_hash(value_in_json_response)
_ -> value_in_json_response
end
)

@ -10,12 +10,14 @@ defmodule Indexer.Helper do
fetch_block_number_by_tag: 2,
json_rpc: 2,
quantity_to_integer: 1,
integer_to_quantity: 1,
request: 1
]
alias EthereumJSONRPC.Block.ByNumber
alias EthereumJSONRPC.Blocks
alias EthereumJSONRPC.{Blocks, Transport}
alias Explorer.Chain.Hash
alias Explorer.SmartContract.Reader, as: ContractReader
@finite_retries_number 3
@infinite_retries_number 100_000_000
@ -88,7 +90,19 @@ defmodule Indexer.Helper do
end
end
defp get_safe_block(json_rpc_named_arguments) do
@doc """
Retrieves the safe block if the endpoint supports such an interface; otherwise, it requests the latest block.
## Parameters
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
`{block_num, latest}`: A tuple where
- `block_num` is the safe or latest block number.
- `latest` is a boolean, where `true` indicates that `block_num` is the latest block number fetched using the tag `latest`.
"""
@spec get_safe_block(EthereumJSONRPC.json_rpc_named_arguments()) :: {non_neg_integer(), boolean()}
def get_safe_block(json_rpc_named_arguments) do
case get_block_number_by_tag("safe", json_rpc_named_arguments) do
{:ok, safe_block} ->
{safe_block, false}
@ -154,6 +168,70 @@ defmodule Indexer.Helper do
]
end
@doc """
Retrieves event logs from Ethereum-like blockchains within a specified block
range for a given address and set of topics using JSON-RPC.
## Parameters
- `from_block`: The starting block number (integer or hexadecimal string) for the log search.
- `to_block`: The ending block number (integer or hexadecimal string) for the log search.
- `address`: The address of the contract to filter logs from.
- `topics`: List of topics to filter the logs.
- `json_rpc_named_arguments`: Configuration for the JSON-RPC call.
- `id`: (optional) JSON-RPC request identifier, defaults to 0.
- `retries`: (optional) Number of retry attempts if the request fails, defaults to 3.
## Returns
- `{:ok, logs}` on successful retrieval of logs.
- `{:error, reason}` if the request fails after all retries.
"""
@spec get_logs(
non_neg_integer() | binary(),
non_neg_integer() | binary(),
binary(),
[binary()],
EthereumJSONRPC.json_rpc_named_arguments()
) :: {:error, atom() | binary() | map()} | {:ok, any()}
@spec get_logs(
non_neg_integer() | binary(),
non_neg_integer() | binary(),
binary(),
[binary()],
EthereumJSONRPC.json_rpc_named_arguments(),
integer()
) :: {:error, atom() | binary() | map()} | {:ok, any()}
@spec get_logs(
non_neg_integer() | binary(),
non_neg_integer() | binary(),
binary(),
[binary()],
EthereumJSONRPC.json_rpc_named_arguments(),
integer(),
non_neg_integer()
) :: {:error, atom() | binary() | map()} | {:ok, any()}
def get_logs(from_block, to_block, address, topics, json_rpc_named_arguments, id \\ 0, retries \\ 3) do
processed_from_block = if is_integer(from_block), do: integer_to_quantity(from_block), else: from_block
processed_to_block = if is_integer(to_block), do: integer_to_quantity(to_block), else: to_block
req =
request(%{
id: id,
method: "eth_getLogs",
params: [
%{
:fromBlock => processed_from_block,
:toBlock => processed_to_block,
:address => address,
:topics => topics
}
]
})
error_message = &"Cannot fetch logs for the block range #{from_block}..#{to_block}. Error: #{inspect(&1)}"
repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, retries)
end
@doc """
Prints a log of progress when handling something splitted to block chunks.
"""
@ -204,11 +282,170 @@ defmodule Indexer.Helper do
end
@doc """
Calls the given function with the given arguments
until it returns {:ok, any()} or the given attempts number is reached.
Pauses execution between invokes for 3..1200 seconds (depending on the number of retries).
Retrieves decoded results of `eth_call` requests to contracts, with retry logic for handling errors.
The function attempts the specified number of retries, with a progressive delay between
each retry, for each `eth_call` request. If, after all retries, some requests remain
unsuccessful, it returns a list of unique error messages encountered.
## Parameters
- `requests`: A list of `EthereumJSONRPC.Contract.call()` instances describing the parameters
for `eth_call`, including the contract address and method selector.
- `abi`: A list of maps providing the ABI that describes the input parameters and output
format for the contract functions.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
- `retries_left`: The number of retries allowed for any `eth_call` that returns an error.
## Returns
- `{responses, errors}` where:
- `responses`: A list of tuples `{status, result}`, where `result` is the decoded response
from the corresponding `eth_call` if `status` is `:ok`, or the error message
if `status` is `:error`.
- `errors`: A list of error messages, if any element in `responses` contains `:error`.
"""
@spec repeated_call((... -> any()), list(), (... -> any()), non_neg_integer()) ::
@spec read_contracts_with_retries(
[EthereumJSONRPC.Contract.call()],
[map()],
EthereumJSONRPC.json_rpc_named_arguments(),
integer()
) :: {[{:ok | :error, any()}], list()}
def read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left)
when is_list(requests) and is_list(abi) and is_integer(retries_left) do
do_read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left, 0)
end
defp do_read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left, retries_done) do
responses = ContractReader.query_contracts(requests, abi, json_rpc_named_arguments: json_rpc_named_arguments)
error_messages =
Enum.reduce(responses, [], fn {status, error_message}, acc ->
acc ++
if status == :error do
[error_message]
else
[]
end
end)
if error_messages == [] do
{responses, []}
else
retries_left = retries_left - 1
if retries_left <= 0 do
{responses, Enum.uniq(error_messages)}
else
Logger.error("#{List.first(error_messages)}. Retrying...")
pause_before_retry(retries_done)
do_read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left, retries_done + 1)
end
end
end
@doc """
Executes a batch of RPC calls with retry logic for handling errors.
This function performs a batch of RPC calls, retrying a specified number of times
with a progressive delay between each attempt up to a maximum (20 minutes). If,
after all retries, some calls remain unsuccessful, it returns the batch responses,
which include the results of successful calls or error descriptions.
## Parameters
- `requests`: A list of `Transport.request()` instances describing the RPC calls.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
- `error_message_generator`: A function that generates a string containing the error
message returned by the RPC call.
- `retries_left`: The number of retries allowed for any RPC call that returns an error.
## Returns
- `{:ok, responses}`: When all calls are successful, `responses` is a list of standard
JSON responses, each including `id` and `result` fields.
- `{:error, responses}`: When some calls fail, `responses` is a list containing either
standard JSON responses for successful calls (including `id`
and `result` fields) or errors, which may be in an unassured
format.
"""
@spec repeated_batch_rpc_call([Transport.request()], EthereumJSONRPC.json_rpc_named_arguments(), fun(), integer()) ::
{:error, any()} | {:ok, any()}
def repeated_batch_rpc_call(requests, json_rpc_named_arguments, error_message_generator, retries_left)
when is_list(requests) and is_function(error_message_generator) and is_integer(retries_left) do
do_repeated_batch_rpc_call(requests, json_rpc_named_arguments, error_message_generator, retries_left, 0)
end
# credo:disable-for-next-line Credo.Check.Refactor.CyclomaticComplexity
defp do_repeated_batch_rpc_call(
requests,
json_rpc_named_arguments,
error_message_generator,
retries_left,
retries_done
) do
case json_rpc(requests, json_rpc_named_arguments) do
{:ok, responses_list} = batch_responses ->
standardized_error =
Enum.reduce_while(responses_list, %{}, fn one_response, acc ->
# credo:disable-for-next-line Credo.Check.Refactor.Nesting
case one_response do
%{error: error_msg_with_code} -> {:halt, error_msg_with_code}
_ -> {:cont, acc}
end
end)
case standardized_error do
%{code: _, message: error_msg} -> {:error, error_msg, batch_responses}
_ -> {:ok, batch_responses, []}
end
{:error, message} = err ->
{:error, message, err}
end
|> case do
# credo:disable-for-previous-line Credo.Check.Refactor.PipeChainStart
{:ok, responses, _} ->
responses
{:error, message, responses_or_error} ->
retries_left = retries_left - 1
if retries_left <= 0 do
Logger.error(error_message_generator.(message))
responses_or_error
else
Logger.error("#{error_message_generator.(message)} Retrying...")
pause_before_retry(retries_done)
do_repeated_batch_rpc_call(
requests,
json_rpc_named_arguments,
error_message_generator,
retries_left,
retries_done + 1
)
end
end
end
@doc """
Repeatedly executes a specified function with given arguments until it succeeds
or reaches the limit of retry attempts. It pauses between retries, with the
pause duration increasing progressively up to a maximum (20 minutes).
The main intent of the function is to robustly handle RPC calls that may fail.
## Parameters
- `func`: The function to be called.
- `args`: List of arguments to pass to the function.
- `error_message`: A function that takes an error message and returns a log message.
- `retries_left`: The number of attempts left.
- `retries_done`: (optional) The number of attempts already made, defaults to 0.
## Returns
- `{:ok, result}` on success.
- `{:error, reason}` if retries are exhausted without success.
"""
@spec repeated_call(function(), list(), function(), non_neg_integer()) ::
{:ok, any()} | {:error, binary() | atom() | map()}
@spec repeated_call(function(), list(), function(), non_neg_integer(), non_neg_integer()) ::
{:ok, any()} | {:error, binary() | atom() | map()}
def repeated_call(func, args, error_message, retries_left, retries_done \\ 0) do
case apply(func, args) do
@ -223,10 +460,7 @@ defmodule Indexer.Helper do
err
else
Logger.error("#{error_message.(message)} Retrying...")
# wait up to 20 minutes
:timer.sleep(min(3000 * Integer.pow(2, retries_done), 1_200_000))
pause_before_retry(retries_done)
repeated_call(func, args, error_message, retries_left, retries_done + 1)
end
end
@ -307,4 +541,9 @@ defmodule Indexer.Helper do
Hash.to_string(topic)
end
end
# Pauses the process, incrementally increasing the sleep time up to a maximum of 20 minutes.
defp pause_before_retry(retries_done) do
:timer.sleep(min(3000 * Integer.pow(2, retries_done), 1_200_000))
end
end

@ -46,6 +46,9 @@ defmodule Indexer.Supervisor do
Withdrawal
}
alias Indexer.Fetcher.Arbitrum.RollupMessagesCatchup, as: ArbitrumRollupMessagesCatchup
alias Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses, as: ArbitrumTrackingBatchesStatuses
alias Indexer.Fetcher.Arbitrum.TrackingMessagesOnL1, as: ArbitrumTrackingMessagesOnL1
alias Indexer.Fetcher.ZkSync.BatchesStatusTracker, as: ZkSyncBatchesStatusTracker
alias Indexer.Fetcher.ZkSync.TransactionBatch, as: ZkSyncTransactionBatch
@ -177,6 +180,15 @@ defmodule Indexer.Supervisor do
configure(Indexer.Fetcher.PolygonZkevm.TransactionBatch.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),
configure(ArbitrumTrackingMessagesOnL1.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),
configure(ArbitrumTrackingBatchesStatuses.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),
configure(ArbitrumRollupMessagesCatchup.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),
{Indexer.Fetcher.Beacon.Blob.Supervisor, [[memory_monitor: memory_monitor]]},
# Out-of-band fetchers

@ -0,0 +1,44 @@
defmodule Indexer.Transform.Arbitrum.Messaging do
@moduledoc """
Helper functions for transforming data for Arbitrum cross-chain messages.
"""
alias Indexer.Fetcher.Arbitrum.Messaging, as: ArbitrumMessages
require Logger
@doc """
Parses and combines lists of rollup transactions and logs to identify and process both L1-to-L2 and L2-to-L1 messages.
This function utilizes two filtering operations: one that identifies L1-to-L2
message completions from a list of transactions and another that identifies
L2-to-L1 message initiations from a list of logs. Each filter constructs
a detailed message structure for the respective direction. The function then
combines these messages into a single list suitable for database import.
## Parameters
- `transactions`: A list of rollup transaction entries to filter for L1-to-L2 messages.
- `logs`: A list of log entries to filter for L2-to-L1 messages.
## Returns
- A combined list of detailed message maps from both L1-to-L2 completions and
L2-to-L1 initiations, ready for database import.
"""
@spec parse(list(), list()) :: list()
def parse(transactions, logs) do
prev_metadata = Logger.metadata()
Logger.metadata(fetcher: :arbitrum_bridge_l2)
l1_to_l2_completion_ops =
transactions
|> ArbitrumMessages.filter_l1_to_l2_messages()
l2_to_l1_initiating_ops =
logs
|> ArbitrumMessages.filter_l2_to_l1_messages()
Logger.reset_metadata(prev_metadata)
l1_to_l2_completion_ops ++ l2_to_l1_initiating_ops
end
end

@ -13,8 +13,7 @@ defmodule Indexer.Transform.TransactionActions do
alias Explorer.Chain.Cache.{TransactionActionTokensData, TransactionActionUniswapPools}
alias Explorer.Chain.{Address, Hash, Token, TransactionAction}
alias Explorer.Repo
alias Explorer.SmartContract.Reader
alias Indexer.Helper
alias Indexer.Helper, as: IndexerHelper
@mainnet 1
@goerli 5
@ -198,7 +197,7 @@ defmodule Indexer.Transform.TransactionActions do
@aave_v3_liquidation_call_event
],
sanitize_first_topic(log.first_topic)
) && Helper.address_hash_to_string(log.address_hash, true) == pool_address
) && IndexerHelper.address_hash_to_string(log.address_hash, true) == pool_address
end)
end
@ -290,12 +289,12 @@ defmodule Indexer.Transform.TransactionActions do
debt_address =
log.third_topic
|> Helper.log_topic_to_string()
|> IndexerHelper.log_topic_to_string()
|> truncate_address_hash()
collateral_address =
log.second_topic
|> Helper.log_topic_to_string()
|> IndexerHelper.log_topic_to_string()
|> truncate_address_hash()
case get_token_data([debt_address, collateral_address]) do
@ -330,7 +329,7 @@ defmodule Indexer.Transform.TransactionActions do
when type in ["borrow", "supply", "withdraw", "repay", "flash_loan"] do
address =
address_topic
|> Helper.log_topic_to_string()
|> IndexerHelper.log_topic_to_string()
|> truncate_address_hash()
case get_token_data([address]) do
@ -360,7 +359,7 @@ defmodule Indexer.Transform.TransactionActions do
defp aave_handle_event(type, log, address_topic, chain_id) when type in ["enable_collateral", "disable_collateral"] do
address =
address_topic
|> Helper.log_topic_to_string()
|> IndexerHelper.log_topic_to_string()
|> truncate_address_hash()
case get_token_data([address]) do
@ -415,7 +414,7 @@ defmodule Indexer.Transform.TransactionActions do
first_topic
) ||
(first_topic == @uniswap_v3_transfer_nft_event &&
Helper.address_hash_to_string(log.address_hash, true) == uniswap_v3_positions_nft)
IndexerHelper.address_hash_to_string(log.address_hash, true) == uniswap_v3_positions_nft)
end)
end
@ -424,7 +423,7 @@ defmodule Indexer.Transform.TransactionActions do
with false <- first_topic == @uniswap_v3_transfer_nft_event,
# check UniswapV3Pool contract is legitimate
pool_address <- Helper.address_hash_to_string(log.address_hash, true),
pool_address <- IndexerHelper.address_hash_to_string(log.address_hash, true),
false <- is_nil(legitimate[pool_address]),
false <- Enum.empty?(legitimate[pool_address]),
# this is legitimate uniswap pool, so handle this event
@ -466,19 +465,19 @@ defmodule Indexer.Transform.TransactionActions do
# This is Transfer event for NFT
from =
log.second_topic
|> Helper.log_topic_to_string()
|> IndexerHelper.log_topic_to_string()
|> truncate_address_hash()
# credo:disable-for-next-line
if from == burn_address_hash_string() do
to =
log.third_topic
|> Helper.log_topic_to_string()
|> IndexerHelper.log_topic_to_string()
|> truncate_address_hash()
[token_id] =
log.fourth_topic
|> Helper.log_topic_to_string()
|> IndexerHelper.log_topic_to_string()
|> decode_data([{:uint, 256}])
mint_nft_ids = Map.put_new(acc, to, %{ids: [], log_index: log.index})
@ -614,7 +613,7 @@ defmodule Indexer.Transform.TransactionActions do
sanitize_first_topic(log.first_topic) != @uniswap_v3_transfer_nft_event
end)
|> Enum.reduce(addresses_acc, fn log, acc ->
pool_address = Helper.address_hash_to_string(log.address_hash, true)
pool_address = IndexerHelper.address_hash_to_string(log.address_hash, true)
Map.put(acc, pool_address, true)
end)
end)
@ -680,10 +679,14 @@ defmodule Indexer.Transform.TransactionActions do
end)
|> Enum.map(fn {pool_address, pool} ->
token0 =
if Helper.address_correct?(pool.token0), do: String.downcase(pool.token0), else: burn_address_hash_string()
if IndexerHelper.address_correct?(pool.token0),
do: String.downcase(pool.token0),
else: burn_address_hash_string()
token1 =
if Helper.address_correct?(pool.token1), do: String.downcase(pool.token1), else: burn_address_hash_string()
if IndexerHelper.address_correct?(pool.token1),
do: String.downcase(pool.token1),
else: burn_address_hash_string()
fee = if pool.fee == "", do: 0, else: pool.fee
@ -696,10 +699,7 @@ defmodule Indexer.Transform.TransactionActions do
}
end)
max_retries = Application.get_env(:explorer, :token_functions_reader_max_retries)
{responses_get_pool, error_messages} =
read_contracts_with_retries(requests_get_pool, @uniswap_v3_factory_abi, max_retries)
{responses_get_pool, error_messages} = read_contracts(requests_get_pool, @uniswap_v3_factory_abi)
if not Enum.empty?(error_messages) or Enum.count(requests_get_pool) != Enum.count(responses_get_pool) do
Logger.error(
@ -727,9 +727,7 @@ defmodule Indexer.Transform.TransactionActions do
end)
|> List.flatten()
max_retries = Application.get_env(:explorer, :token_functions_reader_max_retries)
{responses, error_messages} = read_contracts_with_retries(requests, @uniswap_v3_pool_abi, max_retries)
{responses, error_messages} = read_contracts(requests, @uniswap_v3_pool_abi)
if not Enum.empty?(error_messages) do
incorrect_pools = uniswap_get_incorrect_pools(requests, responses)
@ -959,8 +957,7 @@ defmodule Indexer.Transform.TransactionActions do
end)
|> List.flatten()
max_retries = Application.get_env(:explorer, :token_functions_reader_max_retries)
{responses, error_messages} = read_contracts_with_retries(requests, @erc20_abi, max_retries)
{responses, error_messages} = read_contracts(requests, @erc20_abi)
if not Enum.empty?(error_messages) or Enum.count(requests) != Enum.count(responses) do
Logger.warning(
@ -976,34 +973,15 @@ defmodule Indexer.Transform.TransactionActions do
|> Enum.group_by(& &1.transaction_hash)
end
defp read_contracts_with_retries(requests, abi, retries_left) when retries_left > 0 do
responses = Reader.query_contracts(requests, abi)
error_messages =
Enum.reduce(responses, [], fn {status, error_message}, acc ->
acc ++
if status == :error do
[error_message]
else
[]
end
end)
if Enum.empty?(error_messages) do
{responses, []}
else
retries_left = retries_left - 1
defp read_contracts(requests, abi) do
max_retries = Application.get_env(:explorer, :token_functions_reader_max_retries)
json_rpc_named_arguments = Application.get_env(:explorer, :json_rpc_named_arguments)
if retries_left == 0 do
{responses, Enum.uniq(error_messages)}
else
read_contracts_with_retries(requests, abi, retries_left)
end
end
IndexerHelper.read_contracts_with_retries(requests, abi, json_rpc_named_arguments, max_retries)
end
defp sanitize_first_topic(first_topic) do
if is_nil(first_topic), do: "", else: String.downcase(Helper.log_topic_to_string(first_topic))
if is_nil(first_topic), do: "", else: String.downcase(IndexerHelper.log_topic_to_string(first_topic))
end
defp truncate_address_hash(nil), do: burn_address_hash_string()

@ -21,6 +21,7 @@ defmodule ConfigHelper do
:filecoin -> base_repos ++ [Explorer.Repo.Filecoin]
:stability -> base_repos ++ [Explorer.Repo.Stability]
:zksync -> base_repos ++ [Explorer.Repo.ZkSync]
:arbitrum -> base_repos ++ [Explorer.Repo.Arbitrum]
_ -> base_repos
end

@ -249,7 +249,7 @@ precompiled_config_base_dir =
precompiled_config_default_path =
case ConfigHelper.chain_type() do
"arbitrum" -> "#{precompiled_config_base_dir}config/assets/precompiles-arbitrum.json"
:arbitrum -> "#{precompiled_config_base_dir}config/assets/precompiles-arbitrum.json"
_ -> nil
end
@ -852,6 +852,47 @@ config :indexer, Indexer.Fetcher.ZkSync.BatchesStatusTracker,
config :indexer, Indexer.Fetcher.ZkSync.BatchesStatusTracker.Supervisor,
enabled: ConfigHelper.parse_bool_env_var("INDEXER_ZKSYNC_BATCHES_ENABLED")
config :indexer, Indexer.Fetcher.Arbitrum.Messaging,
arbsys_contract:
ConfigHelper.safe_get_env("INDEXER_ARBITRUM_ARBSYS_CONTRACT", "0x0000000000000000000000000000000000000064")
config :indexer, Indexer.Fetcher.Arbitrum,
l1_rpc: System.get_env("INDEXER_ARBITRUM_L1_RPC"),
l1_rpc_chunk_size: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_L1_RPC_CHUNK_SIZE", 20),
l1_rpc_block_range: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_L1_RPC_HISTORICAL_BLOCKS_RANGE", 1000),
l1_rollup_address: System.get_env("INDEXER_ARBITRUM_L1_ROLLUP_CONTRACT"),
l1_rollup_init_block: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_L1_ROLLUP_INIT_BLOCK", 1),
l1_start_block: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_L1_COMMON_START_BLOCK", 0),
rollup_chunk_size: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_ROLLUP_CHUNK_SIZE", 20)
config :indexer, Indexer.Fetcher.Arbitrum.TrackingMessagesOnL1,
recheck_interval: ConfigHelper.parse_time_env_var("INDEXER_ARBITRUM_TRACKING_MESSAGES_ON_L1_RECHECK_INTERVAL", "20s")
config :indexer, Indexer.Fetcher.Arbitrum.TrackingMessagesOnL1.Supervisor,
enabled: ConfigHelper.parse_bool_env_var("INDEXER_ARBITRUM_BRIDGE_MESSAGES_TRACKING_ENABLED")
config :indexer, Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses,
recheck_interval: ConfigHelper.parse_time_env_var("INDEXER_ARBITRUM_BATCHES_TRACKING_RECHECK_INTERVAL", "20s"),
track_l1_tx_finalization:
ConfigHelper.parse_bool_env_var("INDEXER_ARBITRUM_BATCHES_TRACKING_L1_FINALIZATION_CHECK_ENABLED", "false"),
messages_to_blocks_shift:
ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_BATCHES_TRACKING_MESSAGES_TO_BLOCKS_SHIFT", 0),
finalized_confirmations: ConfigHelper.parse_bool_env_var("INDEXER_ARBITRUM_CONFIRMATIONS_TRACKING_FINALIZED", "true"),
new_batches_limit: ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_NEW_BATCHES_LIMIT", 10)
config :indexer, Indexer.Fetcher.Arbitrum.TrackingBatchesStatuses.Supervisor,
enabled: ConfigHelper.parse_bool_env_var("INDEXER_ARBITRUM_BATCHES_TRACKING_ENABLED")
config :indexer, Indexer.Fetcher.Arbitrum.RollupMessagesCatchup,
recheck_interval: ConfigHelper.parse_time_env_var("INDEXER_ARBITRUM_MISSED_MESSAGES_RECHECK_INTERVAL", "1h"),
messages_to_l2_blocks_depth:
ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_MISSED_MESSAGES_TO_L2_BLOCK_DEPTH", 50),
messages_to_l1_blocks_depth:
ConfigHelper.parse_integer_env_var("INDEXER_ARBITRUM_MISSED_MESSAGES_TO_L1_BLOCK_DEPTH", 1000)
config :indexer, Indexer.Fetcher.Arbitrum.RollupMessagesCatchup.Supervisor,
enabled: ConfigHelper.parse_bool_env_var("INDEXER_ARBITRUM_BRIDGE_MESSAGES_TRACKING_ENABLED")
config :indexer, Indexer.Fetcher.RootstockData.Supervisor,
disabled?:
ConfigHelper.chain_type() != :rsk || ConfigHelper.parse_bool_env_var("INDEXER_DISABLE_ROOTSTOCK_DATA_FETCHER")

@ -156,6 +156,15 @@ config :explorer, Explorer.Repo.Filecoin,
url: System.get_env("DATABASE_URL"),
pool_size: 1
# Configure Arbitrum database
config :explorer, Explorer.Repo.Arbitrum,
database: database,
hostname: hostname,
url: System.get_env("DATABASE_URL"),
# actually this repo is not started, and its pool size remains unused.
# separating repos for different CHAIN_TYPE is implemented only for the sake of keeping DB schema update relevant to the current chain type
pool_size: 1
# Configures Stability database
config :explorer, Explorer.Repo.Stability,
database: database,

@ -121,6 +121,14 @@ config :explorer, Explorer.Repo.Filecoin,
pool_size: 1,
ssl: ExplorerConfigHelper.ssl_enabled?()
# Configures Arbitrum database
config :explorer, Explorer.Repo.Arbitrum,
url: System.get_env("DATABASE_URL"),
# actually this repo is not started, and its pool size remains unused.
# separating repos for different CHAIN_TYPE is implemented only for the sake of keeping DB schema update relevant to the current chain type
pool_size: 1,
ssl: ExplorerConfigHelper.ssl_enabled?()
# Configures Stability database
config :explorer, Explorer.Repo.Stability,
url: System.get_env("DATABASE_URL"),

@ -13,7 +13,6 @@
"AIRTABLE",
"ARGMAX",
"Aiubo",
"Arbitrum",
"Asfpp",
"Asfpp",
"Autodetection",
@ -114,6 +113,10 @@
"alloc",
"amzootyukbugmx",
"apikey",
"APIV",
"Arbitrum",
"arbsys",
"ARGMAX",
"arounds",
"asda",
"atoken",
@ -136,6 +139,7 @@
"bizbuz",
"blockheight",
"blockless",
"blocknum",
"blockno",
"blockreward",
"blockscout",
@ -155,6 +159,7 @@
"cacerts",
"callcode",
"calltracer",
"callvalue",
"capturelog",
"cattributes",
"cellspacing",
@ -192,6 +197,8 @@
"contractname",
"cooldown",
"cooltesthost",
"crosschain",
"crosslevel",
"crossorigin",
"CRYPTOCOMPARE",
"ctbs",
@ -459,8 +466,10 @@
"reqs",
"rerequest",
"reshows",
"retcode",
"retryable",
"returnaddress",
"retval",
"reuseaddr",
"rollup",
"rollups",
@ -554,6 +563,7 @@
"unclosable",
"unfetched",
"unfinalized",
"unindexed",
"unknownc",
"unknowne",
"unmarshal",
@ -593,6 +603,7 @@
"xbaddress",
"xdai",
"xffff",
"xlevel",
"xlink",
"xmark",
"xmlhttprequest",

Loading…
Cancel
Save