zksync chain type support (#9631)

* zkSync customizations

* Insert placeholders instead of deriving current token balances

* ZkSync Batches status tracking (#9080)

* initial version of batch tracking

* missed file added

* attempt to add DB migration

* Finalized L1 txs tracking

* keep batches in DB

* Batches statuses tracker introduction

* rpc endponts to get batches data

* extended views for blocks and transactions

* Refactoring of fetchers

* Fetch historical blocks

* handle_info calls simplified

* Ability to recover missed blocks

* zksync info in a separate sub-map

* added doc comments, part 1

* finalized doc comments

* actual bathes count instead of the last imported batch

* fix formatting

* credo fixes

* Address dialyzer warnings

* Fix spelling

* remaining issues with spelling and dialyzer

* Attempt to address BlockScout Web Tests issue

* review comments addressed, part 1

* review comments addressed, part 2

* collection all_options for import module reworked to get rid of dialyzer findings

* removed unnecessary functionality

* proper import

* Credo fixes

* Add CHAIN_TYPE=zksync to image generation workflow

* Proper handling of empty transactions list in etc_getBlockByNumber

* Merge master

* Address merge issues

* Fix format

* Refactoring of chain type specific code for block and transaction views

* Consistent name for functions

* add exceptions for Credo.Check.Design.AliasUsage

* Fix rebasing conflicts

* Fix rebase conflicts

* fix issue with stability fees in tx view

* make Stability related tests dependent on chain type in compile time

* move zksync related migration

* Changelog updated

* removal of duplicated migration

* List r,s,v as optional attributes for transaction

---------

Co-authored-by: Viktor Baranov <baranov.viktor.27@gmail.com>
Co-authored-by: Qwerty5Uiop <alex000010@bk.ru>
pull/9640/head
Alexander Kolotov 8 months ago committed by GitHub
parent 400b45b145
commit 51d82f1dbf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 1
      .github/workflows/publish-docker-image-for-zksync.yml
  2. 1
      CHANGELOG.md
  3. 17
      apps/block_scout_web/lib/block_scout_web/api_router.ex
  4. 23
      apps/block_scout_web/lib/block_scout_web/controllers/api/v2/block_controller.ex
  5. 25
      apps/block_scout_web/lib/block_scout_web/controllers/api/v2/transaction_controller.ex
  6. 120
      apps/block_scout_web/lib/block_scout_web/controllers/api/v2/zksync_controller.ex
  7. 30
      apps/block_scout_web/lib/block_scout_web/views/api/v2/block_view.ex
  8. 41
      apps/block_scout_web/lib/block_scout_web/views/api/v2/ethereum_view.ex
  9. 31
      apps/block_scout_web/lib/block_scout_web/views/api/v2/optimism_view.ex
  10. 47
      apps/block_scout_web/lib/block_scout_web/views/api/v2/polygon_edge_view.ex
  11. 28
      apps/block_scout_web/lib/block_scout_web/views/api/v2/polygon_zkevm_view.ex
  12. 19
      apps/block_scout_web/lib/block_scout_web/views/api/v2/rootstock_view.ex
  13. 126
      apps/block_scout_web/lib/block_scout_web/views/api/v2/stability_view.ex
  14. 130
      apps/block_scout_web/lib/block_scout_web/views/api/v2/suave_view.ex
  15. 317
      apps/block_scout_web/lib/block_scout_web/views/api/v2/transaction_view.ex
  16. 235
      apps/block_scout_web/lib/block_scout_web/views/api/v2/zksync_view.ex
  17. 3
      apps/block_scout_web/mix.exs
  18. 14
      apps/block_scout_web/test/block_scout_web/controllers/api/v2/transaction_controller_test.exs
  19. 5
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/log.ex
  20. 6
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/receipt.ex
  21. 63
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/transaction.ex
  22. 3
      apps/explorer/config/dev.exs
  23. 4
      apps/explorer/config/prod.exs
  24. 1
      apps/explorer/config/test.exs
  25. 1
      apps/explorer/lib/explorer/application.ex
  26. 13
      apps/explorer/lib/explorer/chain/block.ex
  27. 2
      apps/explorer/lib/explorer/chain/import.ex
  28. 86
      apps/explorer/lib/explorer/chain/import/runner/blocks.ex
  29. 79
      apps/explorer/lib/explorer/chain/import/runner/zksync/batch_blocks.ex
  30. 79
      apps/explorer/lib/explorer/chain/import/runner/zksync/batch_transactions.ex
  31. 103
      apps/explorer/lib/explorer/chain/import/runner/zksync/lifecycle_transactions.ex
  32. 122
      apps/explorer/lib/explorer/chain/import/runner/zksync/transaction_batches.ex
  33. 30
      apps/explorer/lib/explorer/chain/import/stage/address_referencing.ex
  34. 26
      apps/explorer/lib/explorer/chain/import/stage/addresses.ex
  35. 13
      apps/explorer/lib/explorer/chain/import/stage/block_referencing.ex
  36. 169
      apps/explorer/lib/explorer/chain/transaction.ex
  37. 37
      apps/explorer/lib/explorer/chain/zksync/batch_block.ex
  38. 37
      apps/explorer/lib/explorer/chain/zksync/batch_transaction.ex
  39. 38
      apps/explorer/lib/explorer/chain/zksync/lifecycle_transaction.ex
  40. 339
      apps/explorer/lib/explorer/chain/zksync/reader.ex
  41. 83
      apps/explorer/lib/explorer/chain/zksync/transaction_batch.ex
  42. 24
      apps/explorer/lib/explorer/repo.ex
  43. 17
      apps/explorer/priv/zk_sync/migrations/20211202082101_make_tranaction_r_s_v_optional.exs
  44. 82
      apps/explorer/priv/zk_sync/migrations/20231213171043_create_zksync_tables.exs
  45. 96
      apps/explorer/test/explorer/chain/import/runner/blocks_test.exs
  46. 184
      apps/explorer/test/explorer/chain/import_test.exs
  47. 10
      apps/indexer/lib/indexer/fetcher/polygon_zkevm/transaction_batch.ex
  48. 242
      apps/indexer/lib/indexer/fetcher/zksync/batches_status_tracker.ex
  49. 413
      apps/indexer/lib/indexer/fetcher/zksync/discovery/batches_data.ex
  50. 163
      apps/indexer/lib/indexer/fetcher/zksync/discovery/workers.ex
  51. 78
      apps/indexer/lib/indexer/fetcher/zksync/status_tracking/committed.ex
  52. 173
      apps/indexer/lib/indexer/fetcher/zksync/status_tracking/common.ex
  53. 78
      apps/indexer/lib/indexer/fetcher/zksync/status_tracking/executed.ex
  54. 137
      apps/indexer/lib/indexer/fetcher/zksync/status_tracking/proven.ex
  55. 149
      apps/indexer/lib/indexer/fetcher/zksync/transaction_batch.ex
  56. 204
      apps/indexer/lib/indexer/fetcher/zksync/utils/db.ex
  57. 143
      apps/indexer/lib/indexer/fetcher/zksync/utils/logging.ex
  58. 403
      apps/indexer/lib/indexer/fetcher/zksync/utils/rpc.ex
  59. 9
      apps/indexer/lib/indexer/supervisor.ex
  60. 1
      config/config_helper.exs
  61. 15
      config/runtime.exs
  62. 11
      config/runtime/dev.exs
  63. 10
      config/runtime/prod.exs
  64. 193
      cspell.json
  65. 6
      docker-compose/envs/common-blockscout.env

@ -37,3 +37,4 @@ jobs:
CACHE_ADDRESS_WITH_BALANCES_UPDATE_INTERVAL=
BLOCKSCOUT_VERSION=v${{ env.RELEASE_VERSION }}-beta.+commit.${{ env.SHORT_SHA }}
RELEASE_VERSION=${{ env.RELEASE_VERSION }}
CHAIN_TYPE=zksync

@ -4,6 +4,7 @@
### Features
- [#9631](https://github.com/blockscout/blockscout/pull/9631) - Initial support of zksync chain type
- [#9490](https://github.com/blockscout/blockscout/pull/9490) - Add blob transaction counter and filter in block view
- [#9486](https://github.com/blockscout/blockscout/pull/9486) - Massive blocks fetcher
- [#9473](https://github.com/blockscout/blockscout/pull/9473) - Add user_op interpretation

@ -207,6 +207,10 @@ defmodule BlockScoutWeb.ApiRouter do
get("/zkevm-batch/:batch_number", V2.TransactionController, :polygon_zkevm_batch)
end
if Application.compile_env(:explorer, :chain_type) == "zksync" do
get("/zksync-batch/:batch_number", V2.TransactionController, :zksync_batch)
end
if Application.compile_env(:explorer, :chain_type) == "suave" do
get("/execution-node/:execution_node_hash_param", V2.TransactionController, :execution_node)
end
@ -281,6 +285,11 @@ defmodule BlockScoutWeb.ApiRouter do
get("/zkevm/batches/confirmed", V2.PolygonZkevmController, :batches_confirmed)
get("/zkevm/batches/latest-number", V2.PolygonZkevmController, :batch_latest_number)
end
if Application.compile_env(:explorer, :chain_type) == "zksync" do
get("/zksync/batches/confirmed", V2.ZkSyncController, :batches_confirmed)
get("/zksync/batches/latest-number", V2.ZkSyncController, :batch_latest_number)
end
end
scope "/stats" do
@ -379,6 +388,14 @@ defmodule BlockScoutWeb.ApiRouter do
end
end
end
scope "/zksync" do
if Application.compile_env(:explorer, :chain_type) == "zksync" do
get("/batches", V2.ZkSyncController, :batches)
get("/batches/count", V2.ZkSyncController, :batches_count)
get("/batches/:batch_number", V2.ZkSyncController, :batch)
end
end
end
scope "/v1", as: :api_v1 do

@ -27,6 +27,15 @@ defmodule BlockScoutWeb.API.V2.BlockController do
[transactions: :beacon_blob_transaction] => :optional
}
"zksync" ->
@chain_type_transaction_necessity_by_association %{}
@chain_type_block_necessity_by_association %{
:zksync_batch => :optional,
:zksync_commit_transaction => :optional,
:zksync_prove_transaction => :optional,
:zksync_execute_transaction => :optional
}
_ ->
@chain_type_transaction_necessity_by_association %{}
@chain_type_block_necessity_by_association %{}
@ -62,6 +71,20 @@ defmodule BlockScoutWeb.API.V2.BlockController do
api?: true
]
@block_params [
necessity_by_association:
%{
[miner: :names] => :optional,
:uncles => :optional,
:nephews => :optional,
:rewards => :optional,
:transactions => :optional,
:withdrawals => :optional
}
|> Map.merge(@chain_type_block_necessity_by_association),
api?: true
]
action_fallback(BlockScoutWeb.API.V2.FallbackController)
def block(conn, %{"block_hash_or_number" => block_hash_or_number}) do

@ -32,6 +32,7 @@ defmodule BlockScoutWeb.API.V2.TransactionController do
alias Explorer.Chain.Beacon.Reader, as: BeaconReader
alias Explorer.Chain.{Hash, Transaction}
alias Explorer.Chain.PolygonZkevm.Reader
alias Explorer.Chain.ZkSync.Reader
alias Indexer.Fetcher.FirstTraceOnDemand
action_fallback(BlockScoutWeb.API.V2.FallbackController)
@ -101,6 +102,13 @@ defmodule BlockScoutWeb.API.V2.TransactionController do
|> Map.put(:zkevm_sequence_transaction, :optional)
|> Map.put(:zkevm_verify_transaction, :optional)
"zksync" ->
necessity_by_association_with_actions
|> Map.put(:zksync_batch, :optional)
|> Map.put(:zksync_commit_transaction, :optional)
|> Map.put(:zksync_prove_transaction, :optional)
|> Map.put(:zksync_execute_transaction, :optional)
"suave" ->
necessity_by_association_with_actions
|> Map.put(:logs, :optional)
@ -168,6 +176,23 @@ defmodule BlockScoutWeb.API.V2.TransactionController do
|> render(:transactions, %{transactions: transactions |> maybe_preload_ens(), items: true})
end
@doc """
Function to handle GET requests to `/api/v2/transactions/zksync-batch/:batch_number` endpoint.
It renders the list of L2 transactions bound to the specified batch.
"""
@spec zksync_batch(Plug.Conn.t(), map()) :: Plug.Conn.t()
def zksync_batch(conn, %{"batch_number" => batch_number} = _params) do
transactions =
batch_number
|> Reader.batch_transactions(api?: true)
|> Enum.map(fn tx -> tx.hash end)
|> Chain.hashes_to_transactions(api?: true, necessity_by_association: @transaction_necessity_by_association)
conn
|> put_status(200)
|> render(:transactions, %{transactions: transactions, items: true})
end
def execution_node(conn, %{"execution_node_hash_param" => execution_node_hash_string} = params) do
with {:format, {:ok, execution_node_hash}} <- {:format, Chain.string_to_address_hash(execution_node_hash_string)} do
full_options =

@ -0,0 +1,120 @@
defmodule BlockScoutWeb.API.V2.ZkSyncController do
use BlockScoutWeb, :controller
import BlockScoutWeb.Chain,
only: [
next_page_params: 4,
paging_options: 1,
split_list_by_page: 1
]
alias Explorer.Chain.ZkSync.{Reader, TransactionBatch}
action_fallback(BlockScoutWeb.API.V2.FallbackController)
@batch_necessity_by_association %{
:commit_transaction => :optional,
:prove_transaction => :optional,
:execute_transaction => :optional,
:l2_transactions => :optional
}
@batches_necessity_by_association %{
:commit_transaction => :optional,
:prove_transaction => :optional,
:execute_transaction => :optional
}
@doc """
Function to handle GET requests to `/api/v2/zksync/batches/:batch_number` endpoint.
"""
@spec batch(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batch(conn, %{"batch_number" => batch_number} = _params) do
case Reader.batch(
batch_number,
necessity_by_association: @batch_necessity_by_association,
api?: true
) do
{:ok, batch} ->
conn
|> put_status(200)
|> render(:zksync_batch, %{batch: batch})
{:error, :not_found} = res ->
res
end
end
@doc """
Function to handle GET requests to `/api/v2/zksync/batches` endpoint.
"""
@spec batches(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batches(conn, params) do
{batches, next_page} =
params
|> paging_options()
|> Keyword.put(:necessity_by_association, @batches_necessity_by_association)
|> Keyword.put(:api?, true)
|> Reader.batches()
|> split_list_by_page()
next_page_params =
next_page_params(
next_page,
batches,
params,
fn %TransactionBatch{number: number} -> %{"number" => number} end
)
conn
|> put_status(200)
|> render(:zksync_batches, %{
batches: batches,
next_page_params: next_page_params
})
end
@doc """
Function to handle GET requests to `/api/v2/zksync/batches/count` endpoint.
"""
@spec batches_count(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batches_count(conn, _params) do
conn
|> put_status(200)
|> render(:zksync_batches_count, %{count: Reader.batches_count(api?: true)})
end
@doc """
Function to handle GET requests to `/api/v2/main-page/zksync/batches/confirmed` endpoint.
"""
@spec batches_confirmed(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batches_confirmed(conn, _params) do
batches =
[]
|> Keyword.put(:necessity_by_association, @batches_necessity_by_association)
|> Keyword.put(:api?, true)
|> Keyword.put(:confirmed?, true)
|> Reader.batches()
conn
|> put_status(200)
|> render(:zksync_batches, %{batches: batches})
end
@doc """
Function to handle GET requests to `/api/v2/main-page/zksync/batches/latest-number` endpoint.
"""
@spec batch_latest_number(Plug.Conn.t(), map()) :: Plug.Conn.t()
def batch_latest_number(conn, _params) do
conn
|> put_status(200)
|> render(:zksync_batch_latest_number, %{number: batch_latest_number()})
end
defp batch_latest_number do
case Reader.batch(:latest, api?: true) do
{:ok, batch} -> batch.number
{:error, :not_found} -> 0
end
end
end

@ -109,37 +109,29 @@ defmodule BlockScoutWeb.API.V2.BlockView do
"rsk" ->
defp chain_type_fields(result, block, single_block?) do
if single_block? do
result
|> Map.put("minimum_gas_price", block.minimum_gas_price)
|> Map.put("bitcoin_merged_mining_header", block.bitcoin_merged_mining_header)
|> Map.put("bitcoin_merged_mining_coinbase_transaction", block.bitcoin_merged_mining_coinbase_transaction)
|> Map.put("bitcoin_merged_mining_merkle_proof", block.bitcoin_merged_mining_merkle_proof)
|> Map.put("hash_for_merged_mining", block.hash_for_merged_mining)
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.RootstockView.extend_block_json_response(result, block)
else
result
end
end
"ethereum" ->
"zksync" ->
defp chain_type_fields(result, block, single_block?) do
if single_block? do
blob_gas_price = Block.transaction_blob_gas_price(block.transactions)
burnt_blob_transaction_fees = Decimal.mult(block.blob_gas_used || 0, blob_gas_price || 0)
result
|> Map.put("blob_tx_count", count_blob_transactions(block))
|> Map.put("blob_gas_used", block.blob_gas_used)
|> Map.put("excess_blob_gas", block.excess_blob_gas)
|> Map.put("blob_gas_price", blob_gas_price)
|> Map.put("burnt_blob_fees", burnt_blob_transaction_fees)
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.ZkSyncView.extend_block_json_response(result, block)
else
result
|> Map.put("blob_tx_count", count_blob_transactions(block))
|> Map.put("blob_gas_used", block.blob_gas_used)
|> Map.put("excess_blob_gas", block.excess_blob_gas)
end
end
"ethereum" ->
defp chain_type_fields(result, block, single_block?) do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.EthereumView.extend_block_json_response(result, block, single_block?)
end
_ ->
defp chain_type_fields(result, _block, _single_block?) do
result

@ -0,0 +1,41 @@
defmodule BlockScoutWeb.API.V2.EthereumView do
alias Explorer.Chain.{Block, Transaction}
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do
case Map.get(transaction, :beacon_blob_transaction) do
nil ->
out_json
%Ecto.Association.NotLoaded{} ->
out_json
item ->
out_json
|> Map.put("max_fee_per_blob_gas", item.max_fee_per_blob_gas)
|> Map.put("blob_versioned_hashes", item.blob_versioned_hashes)
|> Map.put("blob_gas_used", item.blob_gas_used)
|> Map.put("blob_gas_price", item.blob_gas_price)
|> Map.put("burnt_blob_fee", Decimal.mult(item.blob_gas_used, item.blob_gas_price))
end
end
def extend_block_json_response(out_json, %Block{} = block, single_block?) do
blob_gas_used = Map.get(block, :blob_gas_used)
excess_blob_gas = Map.get(block, :excess_blob_gas)
if single_block? do
blob_gas_price = Block.transaction_blob_gas_price(block.transactions)
burnt_blob_transaction_fees = Decimal.mult(blob_gas_used || 0, blob_gas_price || 0)
out_json
|> Map.put("blob_gas_used", blob_gas_used)
|> Map.put("excess_blob_gas", excess_blob_gas)
|> Map.put("blob_gas_price", blob_gas_price)
|> Map.put("burnt_blob_fees", burnt_blob_transaction_fees)
else
out_json
|> Map.put("blob_gas_used", blob_gas_used)
|> Map.put("excess_blob_gas", excess_blob_gas)
end
end
end

@ -146,4 +146,35 @@ defmodule BlockScoutWeb.API.V2.OptimismView do
def render("optimism_items_count.json", %{count: count}) do
count
end
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do
out_json
|> add_optional_transaction_field(transaction, :l1_fee)
|> add_optional_transaction_field(transaction, :l1_fee_scalar)
|> add_optional_transaction_field(transaction, :l1_gas_price)
|> add_optional_transaction_field(transaction, :l1_gas_used)
|> add_optimism_fields(transaction.hash)
end
defp add_optional_transaction_field(out_json, transaction, field) do
case Map.get(transaction, field) do
nil -> out_json
value -> Map.put(out_json, Atom.to_string(field), value)
end
end
defp add_optimism_fields(out_json, transaction_hash) do
withdrawals =
transaction_hash
|> Withdrawal.transaction_statuses()
|> Enum.map(fn {nonce, status, l1_transaction_hash} ->
%{
"nonce" => nonce,
"status" => status,
"l1_transaction_hash" => l1_transaction_hash
}
end)
Map.put(out_json, "op_withdrawals", withdrawals)
end
end

@ -1,6 +1,10 @@
defmodule BlockScoutWeb.API.V2.PolygonEdgeView do
use BlockScoutWeb, :view
alias BlockScoutWeb.API.V2.Helper
alias Explorer.Chain
alias Explorer.Chain.PolygonEdge.Reader
@spec render(String.t(), map()) :: map()
def render("polygon_edge_deposits.json", %{
deposits: deposits,
@ -47,4 +51,47 @@ defmodule BlockScoutWeb.API.V2.PolygonEdgeView do
def render("polygon_edge_items_count.json", %{count: count}) do
count
end
def extend_transaction_json_response(out_json, tx_hash, connection) do
out_json
|> Map.put("polygon_edge_deposit", polygon_edge_deposit(tx_hash, connection))
|> Map.put("polygon_edge_withdrawal", polygon_edge_withdrawal(tx_hash, connection))
end
defp polygon_edge_deposit(transaction_hash, conn) do
transaction_hash
|> Reader.deposit_by_transaction_hash()
|> polygon_edge_deposit_or_withdrawal(conn)
end
defp polygon_edge_withdrawal(transaction_hash, conn) do
transaction_hash
|> Reader.withdrawal_by_transaction_hash()
|> polygon_edge_deposit_or_withdrawal(conn)
end
defp polygon_edge_deposit_or_withdrawal(item, conn) do
if not is_nil(item) do
{from_address, from_address_hash} = hash_to_address_and_hash(item.from)
{to_address, to_address_hash} = hash_to_address_and_hash(item.to)
item
|> Map.put(:from, Helper.address_with_info(conn, from_address, from_address_hash, item.from))
|> Map.put(:to, Helper.address_with_info(conn, to_address, to_address_hash, item.to))
end
end
defp hash_to_address_and_hash(hash) do
with false <- is_nil(hash),
{:ok, address} <-
Chain.hash_to_address(
hash,
[necessity_by_association: %{:names => :optional, :smart_contract => :optional}, api?: true],
false
) do
{address, address.hash}
else
_ -> {nil, nil}
end
end
end

@ -1,6 +1,8 @@
defmodule BlockScoutWeb.API.V2.PolygonZkevmView do
use BlockScoutWeb, :view
alias Explorer.Chain.Transaction
@doc """
Function to render GET requests to `/api/v2/zkevm/batches/:batch_number` endpoint.
"""
@ -158,4 +160,30 @@ defmodule BlockScoutWeb.API.V2.PolygonZkevmView do
}
end)
end
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do
extended_result =
out_json
|> add_optional_transaction_field(transaction, "zkevm_batch_number", :zkevm_batch, :number)
|> add_optional_transaction_field(transaction, "zkevm_sequence_hash", :zkevm_sequence_transaction, :hash)
|> add_optional_transaction_field(transaction, "zkevm_verify_hash", :zkevm_verify_transaction, :hash)
Map.put(extended_result, "zkevm_status", zkevm_status(extended_result))
end
defp zkevm_status(result_map) do
if is_nil(Map.get(result_map, "zkevm_sequence_hash")) do
"Confirmed by Sequencer"
else
"L1 Confirmed"
end
end
defp add_optional_transaction_field(out_json, transaction, out_field, association, association_field) do
case Map.get(transaction, association) do
nil -> out_json
%Ecto.Association.NotLoaded{} -> out_json
item -> Map.put(out_json, out_field, Map.get(item, association_field))
end
end
end

@ -0,0 +1,19 @@
defmodule BlockScoutWeb.API.V2.RootstockView do
alias Explorer.Chain.Block
def extend_block_json_response(out_json, %Block{} = block) do
out_json
|> add_optional_transaction_field(block, :minimum_gas_price)
|> add_optional_transaction_field(block, :bitcoin_merged_mining_header)
|> add_optional_transaction_field(block, :bitcoin_merged_mining_coinbase_transaction)
|> add_optional_transaction_field(block, :bitcoin_merged_mining_merkle_proof)
|> add_optional_transaction_field(block, :hash_for_merged_mining)
end
defp add_optional_transaction_field(out_json, block, field) do
case Map.get(block, field) do
nil -> out_json
value -> Map.put(out_json, Atom.to_string(field), value)
end
end
end

@ -0,0 +1,126 @@
defmodule BlockScoutWeb.API.V2.StabilityView do
alias BlockScoutWeb.API.V2.{Helper, TokenView}
alias Explorer.Chain.{Hash, Log, Token, Transaction}
@api_true [api?: true]
@transaction_fee_event_signature "0x99e7b0ba56da2819c37c047f0511fd2bf6c9b4e27b4a979a19d6da0f74be8155"
@transaction_fee_event_abi [
%{
"anonymous" => false,
"inputs" => [
%{
"indexed" => false,
"internalType" => "address",
"name" => "token",
"type" => "address"
},
%{
"indexed" => false,
"internalType" => "uint256",
"name" => "totalFee",
"type" => "uint256"
},
%{
"indexed" => false,
"internalType" => "address",
"name" => "validator",
"type" => "address"
},
%{
"indexed" => false,
"internalType" => "uint256",
"name" => "validatorFee",
"type" => "uint256"
},
%{
"indexed" => false,
"internalType" => "address",
"name" => "dapp",
"type" => "address"
},
%{
"indexed" => false,
"internalType" => "uint256",
"name" => "dappFee",
"type" => "uint256"
}
],
"name" => "TransactionFee",
"type" => "event"
}
]
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do
case transaction.transaction_fee_log do
[
{"token", "address", false, token_address_hash},
{"totalFee", "uint256", false, total_fee},
{"validator", "address", false, validator_address_hash},
{"validatorFee", "uint256", false, validator_fee},
{"dapp", "address", false, dapp_address_hash},
{"dappFee", "uint256", false, dapp_fee}
] ->
stability_fee = %{
"token" =>
TokenView.render("token.json", %{
token: transaction.transaction_fee_token,
contract_address_hash: bytes_to_address_hash(token_address_hash)
}),
"validator_address" =>
Helper.address_with_info(nil, nil, bytes_to_address_hash(validator_address_hash), false),
"dapp_address" => Helper.address_with_info(nil, nil, bytes_to_address_hash(dapp_address_hash), false),
"total_fee" => to_string(total_fee),
"dapp_fee" => to_string(dapp_fee),
"validator_fee" => to_string(validator_fee)
}
out_json
|> Map.put("stability_fee", stability_fee)
_ ->
out_json
end
end
def transform_transactions(transactions) do
do_extend_with_stability_fees_info(transactions)
end
defp do_extend_with_stability_fees_info(transactions) when is_list(transactions) do
{transactions, _tokens_acc} =
Enum.map_reduce(transactions, %{}, fn transaction, tokens_acc ->
case Log.fetch_log_by_tx_hash_and_first_topic(transaction.hash, @transaction_fee_event_signature, @api_true) do
fee_log when not is_nil(fee_log) ->
{:ok, _selector, mapping} = Log.find_and_decode(@transaction_fee_event_abi, fee_log, transaction.hash)
[{"token", "address", false, token_address_hash}, _, _, _, _, _] = mapping
{token, new_tokens_acc} = check_tokens_acc(bytes_to_address_hash(token_address_hash), tokens_acc)
{%Transaction{transaction | transaction_fee_log: mapping, transaction_fee_token: token}, new_tokens_acc}
_ ->
{transaction, tokens_acc}
end
end)
transactions
end
defp do_extend_with_stability_fees_info(transaction) do
[transaction] = do_extend_with_stability_fees_info([transaction])
transaction
end
defp check_tokens_acc(token_address_hash, tokens_acc) do
if Map.has_key?(tokens_acc, token_address_hash) do
{tokens_acc[token_address_hash], tokens_acc}
else
token = Token.get_by_contract_address_hash(token_address_hash, @api_true)
{token, Map.put(tokens_acc, token_address_hash, token)}
end
end
defp bytes_to_address_hash(bytes), do: %Hash{byte_count: 20, bytes: bytes}
end

@ -0,0 +1,130 @@
defmodule BlockScoutWeb.API.V2.SuaveView do
alias BlockScoutWeb.API.V2.Helper, as: APIHelper
alias BlockScoutWeb.API.V2.TransactionView
alias Explorer.Helper, as: ExplorerHelper
alias Ecto.Association.NotLoaded
alias Explorer.Chain.{Hash, Transaction}
@suave_bid_event "0x83481d5b04dea534715acad673a8177a46fc93882760f36bdc16ccac439d504e"
def extend_transaction_json_response(%Transaction{} = transaction, out_json, single_tx?, conn, watchlist_names) do
if is_nil(Map.get(transaction, :execution_node_hash)) do
out_json
else
wrapped_to_address = Map.get(transaction, :wrapped_to_address)
wrapped_to_address_hash = Map.get(transaction, :wrapped_to_address_hash)
wrapped_input = Map.get(transaction, :wrapped_input)
wrapped_hash = Map.get(transaction, :wrapped_hash)
execution_node = Map.get(transaction, :execution_node)
execution_node_hash = Map.get(transaction, :execution_node_hash)
wrapped_type = Map.get(transaction, :wrapped_type)
wrapped_nonce = Map.get(transaction, :wrapped_nonce)
wrapped_gas = Map.get(transaction, :wrapped_gas)
wrapped_gas_price = Map.get(transaction, :wrapped_gas_price)
wrapped_max_priority_fee_per_gas = Map.get(transaction, :wrapped_max_priority_fee_per_gas)
wrapped_max_fee_per_gas = Map.get(transaction, :wrapped_max_fee_per_gas)
wrapped_value = Map.get(transaction, :wrapped_value)
{[wrapped_decoded_input], _, _} =
TransactionView.decode_transactions(
[
%Transaction{
to_address: wrapped_to_address,
input: wrapped_input,
hash: wrapped_hash
}
],
false
)
out_json
|> Map.put("allowed_peekers", suave_parse_allowed_peekers(transaction.logs))
|> Map.put(
"execution_node",
APIHelper.address_with_info(
conn,
execution_node,
execution_node_hash,
single_tx?,
watchlist_names
)
)
|> Map.put("wrapped", %{
"type" => wrapped_type,
"nonce" => wrapped_nonce,
"to" =>
APIHelper.address_with_info(
conn,
wrapped_to_address,
wrapped_to_address_hash,
single_tx?,
watchlist_names
),
"gas_limit" => wrapped_gas,
"gas_price" => wrapped_gas_price,
"fee" =>
TransactionView.format_fee(
Transaction.fee(
%Transaction{gas: wrapped_gas, gas_price: wrapped_gas_price, gas_used: nil},
:wei
)
),
"max_priority_fee_per_gas" => wrapped_max_priority_fee_per_gas,
"max_fee_per_gas" => wrapped_max_fee_per_gas,
"value" => wrapped_value,
"hash" => wrapped_hash,
"method" =>
TransactionView.method_name(
%Transaction{to_address: wrapped_to_address, input: wrapped_input},
wrapped_decoded_input
),
"decoded_input" => TransactionView.decoded_input(wrapped_decoded_input),
"raw_input" => wrapped_input
})
end
end
# @spec suave_parse_allowed_peekers(Ecto.Schema.has_many(Log.t())) :: [String.t()]
defp suave_parse_allowed_peekers(%NotLoaded{}), do: []
defp suave_parse_allowed_peekers(logs) do
suave_bid_contracts =
Application.get_all_env(:explorer)[Transaction][:suave_bid_contracts]
|> String.split(",")
|> Enum.map(fn sbc -> String.downcase(String.trim(sbc)) end)
bid_event =
Enum.find(logs, fn log ->
sanitize_log_first_topic(log.first_topic) == @suave_bid_event &&
Enum.member?(suave_bid_contracts, String.downcase(Hash.to_string(log.address_hash)))
end)
if is_nil(bid_event) do
[]
else
[_bid_id, _decryption_condition, allowed_peekers] =
ExplorerHelper.decode_data(bid_event.data, [{:bytes, 16}, {:uint, 64}, {:array, :address}])
Enum.map(allowed_peekers, fn peeker ->
"0x" <> Base.encode16(peeker, case: :lower)
end)
end
end
defp sanitize_log_first_topic(first_topic) do
if is_nil(first_topic) do
""
else
sanitized =
if is_binary(first_topic) do
first_topic
else
Hash.to_string(first_topic)
end
String.downcase(sanitized)
end
end
end

@ -2,6 +2,7 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
use BlockScoutWeb, :view
alias BlockScoutWeb.API.V2.{ApiView, Helper, TokenView}
alias BlockScoutWeb.{ABIEncodedValueView, TransactionView}
alias BlockScoutWeb.Models.GetTransactionTags
alias BlockScoutWeb.Tokens.Helper, as: TokensHelper
@ -10,14 +11,11 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
alias Explorer.{Chain, Market}
alias Explorer.Chain.{Address, Block, InternalTransaction, Log, Token, Transaction, Wei}
alias Explorer.Chain.Block.Reward
alias Explorer.Chain.Optimism.Withdrawal, as: OptimismWithdrawal
alias Explorer.Chain.PolygonEdge.Reader
alias Explorer.Chain.Transaction.StateChange
alias Explorer.Counters.AverageBlockTime
alias Timex.Duration
import BlockScoutWeb.Account.AuthController, only: [current_user: 1]
import Explorer.Chain.Transaction, only: [maybe_prepare_stability_fees: 1, bytes_to_address_hash: 1]
@api_true [api?: true]
@ -37,7 +35,7 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
%{
"items" =>
transactions
|> maybe_prepare_stability_fees()
|> chain_type_transformations()
|> Enum.zip(decoded_transactions)
|> Enum.map(fn {tx, decoded_input} ->
prepare_transaction(tx, conn, false, block_height, watchlist_names, decoded_input)
@ -55,7 +53,7 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
{decoded_transactions, _, _} = decode_transactions(transactions, true)
transactions
|> maybe_prepare_stability_fees()
|> chain_type_transformations()
|> Enum.zip(decoded_transactions)
|> Enum.map(fn {tx, decoded_input} ->
prepare_transaction(tx, conn, false, block_height, watchlist_names, decoded_input)
@ -69,7 +67,7 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
%{
"items" =>
transactions
|> maybe_prepare_stability_fees()
|> chain_type_transformations()
|> Enum.zip(decoded_transactions)
|> Enum.map(fn {tx, decoded_input} -> prepare_transaction(tx, conn, false, block_height, decoded_input) end),
"next_page_params" => next_page_params
@ -87,7 +85,7 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
{decoded_transactions, _, _} = decode_transactions(transactions, true)
transactions
|> maybe_prepare_stability_fees()
|> chain_type_transformations()
|> Enum.zip(decoded_transactions)
|> Enum.map(fn {tx, decoded_input} -> prepare_transaction(tx, conn, false, block_height, decoded_input) end)
end
@ -95,7 +93,10 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
def render("transaction.json", %{transaction: transaction, conn: conn}) do
block_height = Chain.block_height(@api_true)
{[decoded_input], _, _} = decode_transactions([transaction], false)
prepare_transaction(transaction |> maybe_prepare_stability_fees(), conn, true, block_height, decoded_input)
transaction
|> chain_type_transformations()
|> prepare_transaction(conn, true, block_height, decoded_input)
end
def render("raw_trace.json", %{internal_transactions: internal_transactions}) do
@ -438,166 +439,6 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
result
|> chain_type_fields(transaction, single_tx?, conn, watchlist_names)
|> maybe_put_stability_fee(transaction)
end
defp add_optional_transaction_field(result, transaction, field) do
case Map.get(transaction, field) do
nil -> result
value -> Map.put(result, Atom.to_string(field), value)
end
end
# credo:disable-for-next-line
defp chain_type_fields(result, transaction, single_tx?, conn, watchlist_names) do
case {single_tx?, Application.get_env(:explorer, :chain_type)} do
{true, "polygon_edge"} ->
result
|> Map.put("polygon_edge_deposit", polygon_edge_deposit(transaction.hash, conn))
|> Map.put("polygon_edge_withdrawal", polygon_edge_withdrawal(transaction.hash, conn))
{true, "polygon_zkevm"} ->
extended_result =
result
|> add_optional_transaction_field(transaction, "zkevm_batch_number", :zkevm_batch, :number)
|> add_optional_transaction_field(transaction, "zkevm_sequence_hash", :zkevm_sequence_transaction, :hash)
|> add_optional_transaction_field(transaction, "zkevm_verify_hash", :zkevm_verify_transaction, :hash)
Map.put(extended_result, "zkevm_status", zkevm_status(extended_result))
{true, "optimism"} ->
result
|> add_optional_transaction_field(transaction, :l1_fee)
|> add_optional_transaction_field(transaction, :l1_fee_scalar)
|> add_optional_transaction_field(transaction, :l1_gas_price)
|> add_optional_transaction_field(transaction, :l1_gas_used)
|> add_optimism_fields(transaction.hash, single_tx?)
{true, "suave"} ->
suave_fields(transaction, result, single_tx?, conn, watchlist_names)
{_, "ethereum"} ->
case Map.get(transaction, :beacon_blob_transaction) do
nil ->
result
%Ecto.Association.NotLoaded{} ->
result
item ->
result
|> Map.put("max_fee_per_blob_gas", item.max_fee_per_blob_gas)
|> Map.put("blob_versioned_hashes", item.blob_versioned_hashes)
|> Map.put("blob_gas_used", item.blob_gas_used)
|> Map.put("blob_gas_price", item.blob_gas_price)
|> Map.put("burnt_blob_fee", Decimal.mult(item.blob_gas_used, item.blob_gas_price))
end
_ ->
result
end
end
defp add_optional_transaction_field(result, transaction, field_name, assoc_name, assoc_field) do
case Map.get(transaction, assoc_name) do
nil -> result
%Ecto.Association.NotLoaded{} -> result
item -> Map.put(result, field_name, Map.get(item, assoc_field))
end
end
defp zkevm_status(result_map) do
if is_nil(Map.get(result_map, "zkevm_sequence_hash")) do
"Confirmed by Sequencer"
else
"L1 Confirmed"
end
end
if Application.compile_env(:explorer, :chain_type) != "suave" do
defp suave_fields(_transaction, result, _single_tx?, _conn, _watchlist_names), do: result
else
defp suave_fields(transaction, result, single_tx?, conn, watchlist_names) do
if is_nil(transaction.execution_node_hash) do
result
else
{[wrapped_decoded_input], _, _} =
decode_transactions(
[
%Transaction{
to_address: transaction.wrapped_to_address,
input: transaction.wrapped_input,
hash: transaction.wrapped_hash
}
],
false
)
result
|> Map.put("allowed_peekers", Transaction.suave_parse_allowed_peekers(transaction.logs))
|> Map.put(
"execution_node",
Helper.address_with_info(
conn,
transaction.execution_node,
transaction.execution_node_hash,
single_tx?,
watchlist_names
)
)
|> Map.put("wrapped", %{
"type" => transaction.wrapped_type,
"nonce" => transaction.wrapped_nonce,
"to" =>
Helper.address_with_info(
conn,
transaction.wrapped_to_address,
transaction.wrapped_to_address_hash,
single_tx?,
watchlist_names
),
"gas_limit" => transaction.wrapped_gas,
"gas_price" => transaction.wrapped_gas_price,
"fee" =>
format_fee(
Transaction.fee(
%Transaction{gas: transaction.wrapped_gas, gas_price: transaction.wrapped_gas_price, gas_used: nil},
:wei
)
),
"max_priority_fee_per_gas" => transaction.wrapped_max_priority_fee_per_gas,
"max_fee_per_gas" => transaction.wrapped_max_fee_per_gas,
"value" => transaction.wrapped_value,
"hash" => transaction.wrapped_hash,
"method" =>
method_name(
%Transaction{to_address: transaction.wrapped_to_address, input: transaction.wrapped_input},
wrapped_decoded_input
),
"decoded_input" => decoded_input(wrapped_decoded_input),
"raw_input" => transaction.wrapped_input
})
end
end
end
defp add_optimism_fields(result, transaction_hash, single_tx?) do
if Application.get_env(:explorer, :chain_type) == "optimism" && single_tx? do
withdrawals =
transaction_hash
|> OptimismWithdrawal.transaction_statuses()
|> Enum.map(fn {nonce, status, l1_transaction_hash} ->
%{
"nonce" => nonce,
"status" => status,
"l1_transaction_hash" => l1_transaction_hash
}
end)
Map.put(result, "op_withdrawals", withdrawals)
else
result
end
end
def token_transfers(_, _conn, false), do: nil
@ -928,71 +769,111 @@ defmodule BlockScoutWeb.API.V2.TransactionView do
Map.merge(map, %{"change" => change})
end
defp polygon_edge_deposit(transaction_hash, conn) do
transaction_hash
|> Reader.deposit_by_transaction_hash()
|> polygon_edge_deposit_or_withdrawal(conn)
case Application.compile_env(:explorer, :chain_type) do
"polygon_edge" ->
defp chain_type_transformations(transactions) do
transactions
end
defp polygon_edge_withdrawal(transaction_hash, conn) do
transaction_hash
|> Reader.withdrawal_by_transaction_hash()
|> polygon_edge_deposit_or_withdrawal(conn)
defp chain_type_fields(result, transaction, single_tx?, conn, _watchlist_names) do
if single_tx? do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.PolygonEdgeView.extend_transaction_json_response(result, transaction.hash, conn)
else
result
end
end
defp polygon_edge_deposit_or_withdrawal(item, conn) do
if not is_nil(item) do
{from_address, from_address_hash} = hash_to_address_and_hash(item.from)
{to_address, to_address_hash} = hash_to_address_and_hash(item.to)
"polygon_zkevm" ->
defp chain_type_transformations(transactions) do
transactions
end
item
|> Map.put(:from, Helper.address_with_info(conn, from_address, from_address_hash, item.from))
|> Map.put(:to, Helper.address_with_info(conn, to_address, to_address_hash, item.to))
defp chain_type_fields(result, transaction, single_tx?, _conn, _watchlist_names) do
if single_tx? do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.PolygonZkevmView.extend_transaction_json_response(result, transaction)
else
result
end
end
defp hash_to_address_and_hash(hash) do
with false <- is_nil(hash),
{:ok, address} <-
Chain.hash_to_address(
hash,
[necessity_by_association: %{:names => :optional, :smart_contract => :optional}, api?: true],
false
) do
{address, address.hash}
"zksync" ->
defp chain_type_transformations(transactions) do
transactions
end
defp chain_type_fields(result, transaction, single_tx?, _conn, _watchlist_names) do
if single_tx? do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.ZkSyncView.extend_transaction_json_response(result, transaction)
else
_ -> {nil, nil}
result
end
end
defp maybe_put_stability_fee(body, transaction) do
with "stability" <- Application.get_env(:explorer, :chain_type),
[
{"token", "address", false, token_address_hash},
{"totalFee", "uint256", false, total_fee},
{"validator", "address", false, validator_address_hash},
{"validatorFee", "uint256", false, validator_fee},
{"dapp", "address", false, dapp_address_hash},
{"dappFee", "uint256", false, dapp_fee}
] <- transaction.transaction_fee_log do
stability_fee = %{
"token" =>
TokenView.render("token.json", %{
token: transaction.transaction_fee_token,
contract_address_hash: bytes_to_address_hash(token_address_hash)
}),
"validator_address" => Helper.address_with_info(nil, nil, bytes_to_address_hash(validator_address_hash), false),
"dapp_address" => Helper.address_with_info(nil, nil, bytes_to_address_hash(dapp_address_hash), false),
"total_fee" => to_string(total_fee),
"dapp_fee" => to_string(dapp_fee),
"validator_fee" => to_string(validator_fee)
}
"optimism" ->
defp chain_type_transformations(transactions) do
transactions
end
body
|> Map.put("stability_fee", stability_fee)
defp chain_type_fields(result, transaction, single_tx?, _conn, _watchlist_names) do
if single_tx? do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.OptimismView.extend_transaction_json_response(result, transaction)
else
result
end
end
"suave" ->
defp chain_type_transformations(transactions) do
transactions
end
defp chain_type_fields(result, transaction, single_tx?, conn, watchlist_names) do
if single_tx? do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.SuaveView.extend_transaction_json_response(
transaction,
result,
single_tx?,
conn,
watchlist_names
)
else
result
end
end
"stability" ->
defp chain_type_transformations(transactions) do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.StabilityView.transform_transactions(transactions)
end
defp chain_type_fields(result, transaction, _single_tx?, _conn, _watchlist_names) do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.StabilityView.extend_transaction_json_response(result, transaction)
end
"ethereum" ->
defp chain_type_transformations(transactions) do
transactions
end
defp chain_type_fields(result, transaction, _single_tx?, _conn, _watchlist_names) do
# credo:disable-for-next-line Credo.Check.Design.AliasUsage
BlockScoutWeb.API.V2.EthereumView.extend_transaction_json_response(result, transaction)
end
_ ->
body
defp chain_type_transformations(transactions) do
transactions
end
defp chain_type_fields(result, _transaction, _single_tx?, _conn, _watchlist_names) do
result
end
end
end

@ -0,0 +1,235 @@
defmodule BlockScoutWeb.API.V2.ZkSyncView do
use BlockScoutWeb, :view
alias Explorer.Chain.{Block, Transaction}
alias Explorer.Chain.ZkSync.TransactionBatch
@doc """
Function to render GET requests to `/api/v2/zksync/batches/:batch_number` endpoint.
"""
@spec render(binary(), map()) :: map() | non_neg_integer()
def render("zksync_batch.json", %{batch: batch}) do
l2_transactions =
if Map.has_key?(batch, :l2_transactions) do
Enum.map(batch.l2_transactions, fn tx -> tx.hash end)
end
%{
"number" => batch.number,
"timestamp" => batch.timestamp,
"root_hash" => batch.root_hash,
"l1_tx_count" => batch.l1_tx_count,
"l2_tx_count" => batch.l2_tx_count,
"l1_gas_price" => batch.l1_gas_price,
"l2_fair_gas_price" => batch.l2_fair_gas_price,
"start_block" => batch.start_block,
"end_block" => batch.end_block,
"transactions" => l2_transactions
}
|> add_l1_txs_info_and_status(batch)
end
@doc """
Function to render GET requests to `/api/v2/zksync/batches` endpoint.
"""
def render("zksync_batches.json", %{
batches: batches,
next_page_params: next_page_params
}) do
%{
items: render_zksync_batches(batches),
next_page_params: next_page_params
}
end
@doc """
Function to render GET requests to `/api/v2/main-page/zksync/batches/confirmed` endpoint.
"""
def render("zksync_batches.json", %{batches: batches}) do
%{items: render_zksync_batches(batches)}
end
@doc """
Function to render GET requests to `/api/v2/zksync/batches/count` endpoint.
"""
def render("zksync_batches_count.json", %{count: count}) do
count
end
@doc """
Function to render GET requests to `/api/v2/main-page/zksync/batches/latest-number` endpoint.
"""
def render("zksync_batch_latest_number.json", %{number: number}) do
number
end
defp render_zksync_batches(batches) do
Enum.map(batches, fn batch ->
%{
"number" => batch.number,
"timestamp" => batch.timestamp,
"tx_count" => batch.l1_tx_count + batch.l2_tx_count
}
|> add_l1_txs_info_and_status(batch)
end)
end
@doc """
Extends the json output with a sub-map containing information related
zksync: batch number and associated L1 transactions and their timestmaps.
## Parameters
- `out_json`: a map defining output json which will be extended
- `transaction`: transaction structure containing zksync related data
## Returns
A map extended with data related zksync rollup
"""
@spec extend_transaction_json_response(map(), %{
:__struct__ => Explorer.Chain.Transaction,
:zksync_batch => any(),
:zksync_commit_transaction => any(),
:zksync_execute_transaction => any(),
:zksync_prove_transaction => any(),
optional(any()) => any()
}) :: map()
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do
do_add_zksync_info(out_json, transaction)
end
@doc """
Extends the json output with a sub-map containing information related
zksync: batch number and associated L1 transactions and their timestmaps.
## Parameters
- `out_json`: a map defining output json which will be extended
- `block`: block structure containing zksync related data
## Returns
A map extended with data related zksync rollup
"""
@spec extend_block_json_response(map(), %{
:__struct__ => Explorer.Chain.Block,
:zksync_batch => any(),
:zksync_commit_transaction => any(),
:zksync_execute_transaction => any(),
:zksync_prove_transaction => any(),
optional(any()) => any()
}) :: map()
def extend_block_json_response(out_json, %Block{} = block) do
do_add_zksync_info(out_json, block)
end
defp do_add_zksync_info(out_json, zksync_entity) do
res =
%{}
|> do_add_l1_txs_info_and_status(%{
batch_number: get_batch_number(zksync_entity),
commit_transaction: zksync_entity.zksync_commit_transaction,
prove_transaction: zksync_entity.zksync_prove_transaction,
execute_transaction: zksync_entity.zksync_execute_transaction
})
|> Map.put("batch_number", get_batch_number(zksync_entity))
Map.put(out_json, "zksync", res)
end
defp get_batch_number(zksync_entity) do
case Map.get(zksync_entity, :zksync_batch) do
nil -> nil
%Ecto.Association.NotLoaded{} -> nil
value -> value.number
end
end
defp add_l1_txs_info_and_status(out_json, %TransactionBatch{} = batch) do
do_add_l1_txs_info_and_status(out_json, batch)
end
defp do_add_l1_txs_info_and_status(out_json, zksync_item) do
l1_txs = get_associated_l1_txs(zksync_item)
out_json
|> Map.merge(%{
"status" => batch_status(zksync_item),
"commit_transaction_hash" => get_2map_data(l1_txs, :commit_transaction, :hash),
"commit_transaction_timestamp" => get_2map_data(l1_txs, :commit_transaction, :ts),
"prove_transaction_hash" => get_2map_data(l1_txs, :prove_transaction, :hash),
"prove_transaction_timestamp" => get_2map_data(l1_txs, :prove_transaction, :ts),
"execute_transaction_hash" => get_2map_data(l1_txs, :execute_transaction, :hash),
"execute_transaction_timestamp" => get_2map_data(l1_txs, :execute_transaction, :ts)
})
end
# Extract transaction hash and timestamp for L1 transactions associated with
# a zksync rollup entity: batch, transaction or block.
#
# ## Parameters
# - `zksync_item`: A batch, transaction, or block.
#
# ## Returns
# A map containing nesting maps describing corresponding L1 transactions
defp get_associated_l1_txs(zksync_item) do
[:commit_transaction, :prove_transaction, :execute_transaction]
|> Enum.reduce(%{}, fn key, l1_txs ->
case Map.get(zksync_item, key) do
nil -> Map.put(l1_txs, key, nil)
%Ecto.Association.NotLoaded{} -> Map.put(l1_txs, key, nil)
value -> Map.put(l1_txs, key, %{hash: value.hash, ts: value.timestamp})
end
end)
end
# Inspects L1 transactions of the batch to determine the batch status.
#
# ## Parameters
# - `zksync_item`: A batch, transaction, or block.
#
# ## Returns
# A string with one of predefined statuses
defp batch_status(zksync_item) do
cond do
specified?(zksync_item.execute_transaction) -> "Executed on L1"
specified?(zksync_item.prove_transaction) -> "Validated on L1"
specified?(zksync_item.commit_transaction) -> "Sent to L1"
# Batch entity itself has no batch_number
not Map.has_key?(zksync_item, :batch_number) -> "Sealed on L2"
not is_nil(zksync_item.batch_number) -> "Sealed on L2"
true -> "Processed on L2"
end
end
# Checks if an item associated with a DB entity has actual value
#
# ## Parameters
# - `associated_item`: an item associated with a DB entity
#
# ## Returns
# - `false`: if the item is nil or not loaded
# - `true`: if the item has actual value
defp specified?(associated_item) do
case associated_item do
nil -> false
%Ecto.Association.NotLoaded{} -> false
_ -> true
end
end
# Gets the value of an element nested in a map using two keys.
#
# Clarification: Returns `map[key1][key2]`
#
# ## Parameters
# - `map`: The high-level map.
# - `key1`: The key of the element in `map`.
# - `key2`: The key of the element in the map accessible by `map[key1]`.
#
# ## Returns
# The value of the element, or `nil` if the map accessible by `key1` does not exist.
defp get_2map_data(map, key1, key2) do
case Map.get(map, key1) do
nil -> nil
inner_map -> Map.get(inner_map, key2)
end
end
end

@ -30,7 +30,8 @@ defmodule BlockScoutWeb.Mixfile do
Explorer.Chain.Beacon.Reader,
Explorer.Chain.Cache.OptimismFinalizationPeriod,
Explorer.Chain.Optimism.OutputRoot,
Explorer.Chain.Optimism.WithdrawalEvent
Explorer.Chain.Optimism.WithdrawalEvent,
Explorer.Chain.ZkSync.Reader
]
]
]

@ -974,19 +974,8 @@ defmodule BlockScoutWeb.API.V2.TransactionControllerTest do
end
end
if Application.compile_env(:explorer, :chain_type) == "stability" do
describe "stability fees" do
setup %{conn: conn} do
old_env = Application.get_env(:explorer, :chain_type)
Application.put_env(:explorer, :chain_type, "stability")
on_exit(fn ->
Application.put_env(:explorer, :chain_type, old_env)
end)
%{conn: conn}
end
test "check stability fees", %{conn: conn} do
tx = insert(:transaction) |> with_block()
@ -1112,6 +1101,7 @@ defmodule BlockScoutWeb.API.V2.TransactionControllerTest do
} = json_response(request, 200)
end
end
end
defp compare_item(%Transaction{} = transaction, json) do
assert to_string(transaction.hash) == json["hash"]

@ -174,6 +174,11 @@ defmodule EthereumJSONRPC.Log do
end
end
# zkSync specific log fields
defp entry_to_elixir({key, _}) when key in ~w(l1BatchNumber logType) do
{nil, nil}
end
defp put_topics(params, topics) when is_map(params) and is_list(topics) do
params
|> Map.put(:first_topic, Enum.at(topics, 0))

@ -374,6 +374,12 @@ defmodule EthereumJSONRPC.Receipt do
:ignore
end
# zkSync specific transaction receipt fields
defp entry_to_elixir({key, _})
when key in ~w(l1BatchNumber l1BatchTxIndex l2ToL1Logs) do
:ignore
end
defp entry_to_elixir({key, value}) do
{:error, {:unknown_key, %{key: key, value: value}}}
end

@ -264,11 +264,8 @@ defmodule EthereumJSONRPC.Transaction do
"hash" => hash,
"input" => input,
"nonce" => nonce,
"r" => r,
"s" => s,
"to" => to_address_hash,
"transactionIndex" => index,
"v" => v,
"value" => value,
"type" => type,
"maxPriorityFeePerGas" => max_priority_fee_per_gas,
@ -285,10 +282,7 @@ defmodule EthereumJSONRPC.Transaction do
index: index,
input: input,
nonce: nonce,
r: r,
s: s,
to_address_hash: to_address_hash,
v: v,
value: value,
transaction_index: index,
type: type,
@ -298,7 +292,10 @@ defmodule EthereumJSONRPC.Transaction do
put_if_present(transaction, result, [
{"creates", :created_contract_address_hash},
{"block_timestamp", :block_timestamp}
{"block_timestamp", :block_timestamp},
{"r", :r},
{"s", :s},
{"v", :v}
])
end
@ -313,11 +310,8 @@ defmodule EthereumJSONRPC.Transaction do
"hash" => hash,
"input" => input,
"nonce" => nonce,
"r" => r,
"s" => s,
"to" => to_address_hash,
"transactionIndex" => index,
"v" => v,
"value" => value,
"type" => type,
"maxPriorityFeePerGas" => max_priority_fee_per_gas,
@ -334,10 +328,7 @@ defmodule EthereumJSONRPC.Transaction do
index: index,
input: input,
nonce: nonce,
r: r,
s: s,
to_address_hash: to_address_hash,
v: v,
value: value,
transaction_index: index,
type: type,
@ -347,10 +338,14 @@ defmodule EthereumJSONRPC.Transaction do
put_if_present(transaction, result, [
{"creates", :created_contract_address_hash},
{"block_timestamp", :block_timestamp}
{"block_timestamp", :block_timestamp},
{"r", :r},
{"s", :s},
{"v", :v}
])
end
# for legacy txs without maxPriorityFeePerGas and maxFeePerGas
def do_elixir_to_params(
%{
"blockHash" => block_hash,
@ -361,11 +356,8 @@ defmodule EthereumJSONRPC.Transaction do
"hash" => hash,
"input" => input,
"nonce" => nonce,
"r" => r,
"s" => s,
"to" => to_address_hash,
"transactionIndex" => index,
"v" => v,
"value" => value,
"type" => type
} = transaction
@ -380,10 +372,7 @@ defmodule EthereumJSONRPC.Transaction do
index: index,
input: input,
nonce: nonce,
r: r,
s: s,
to_address_hash: to_address_hash,
v: v,
value: value,
transaction_index: index,
type: type
@ -391,10 +380,14 @@ defmodule EthereumJSONRPC.Transaction do
put_if_present(transaction, result, [
{"creates", :created_contract_address_hash},
{"block_timestamp", :block_timestamp}
{"block_timestamp", :block_timestamp},
{"r", :r},
{"s", :s},
{"v", :v}
])
end
# for legacy txs without type, maxPriorityFeePerGas and maxFeePerGas
def do_elixir_to_params(
%{
"blockHash" => block_hash,
@ -405,11 +398,8 @@ defmodule EthereumJSONRPC.Transaction do
"hash" => hash,
"input" => input,
"nonce" => nonce,
"r" => r,
"s" => s,
"to" => to_address_hash,
"transactionIndex" => index,
"v" => v,
"value" => value
} = transaction
) do
@ -423,20 +413,21 @@ defmodule EthereumJSONRPC.Transaction do
index: index,
input: input,
nonce: nonce,
r: r,
s: s,
to_address_hash: to_address_hash,
v: v,
value: value,
transaction_index: index
}
put_if_present(transaction, result, [
{"creates", :created_contract_address_hash},
{"block_timestamp", :block_timestamp}
{"block_timestamp", :block_timestamp},
{"r", :r},
{"s", :s},
{"v", :v}
])
end
# for txs without gasPrice, maxPriorityFeePerGas and maxFeePerGas
def do_elixir_to_params(
%{
"blockHash" => block_hash,
@ -446,12 +437,9 @@ defmodule EthereumJSONRPC.Transaction do
"hash" => hash,
"input" => input,
"nonce" => nonce,
"r" => r,
"s" => s,
"to" => to_address_hash,
"transactionIndex" => index,
"type" => type,
"v" => v,
"value" => value
} = transaction
) do
@ -465,10 +453,7 @@ defmodule EthereumJSONRPC.Transaction do
index: index,
input: input,
nonce: nonce,
r: r,
s: s,
to_address_hash: to_address_hash,
v: v,
value: value,
transaction_index: index,
type: type
@ -476,7 +461,10 @@ defmodule EthereumJSONRPC.Transaction do
put_if_present(transaction, result, [
{"creates", :created_contract_address_hash},
{"block_timestamp", :block_timestamp}
{"block_timestamp", :block_timestamp},
{"r", :r},
{"s", :s},
{"v", :v}
])
end
@ -673,6 +661,11 @@ defmodule EthereumJSONRPC.Transaction do
end
end
# ZkSync fields
defp entry_to_elixir({key, _}) when key in ~w(l1BatchNumber l1BatchTxIndex) do
{:ignore, :ignore}
end
defp entry_to_elixir(_) do
{nil, nil}
end

@ -20,6 +20,9 @@ config :explorer, Explorer.Repo.PolygonEdge, timeout: :timer.seconds(80)
# Configure Polygon zkEVM database
config :explorer, Explorer.Repo.PolygonZkevm, timeout: :timer.seconds(80)
# Configure ZkSync database
config :explorer, Explorer.Repo.ZkSync, timeout: :timer.seconds(80)
config :explorer, Explorer.Repo.RSK, timeout: :timer.seconds(80)
config :explorer, Explorer.Repo.Shibarium, timeout: :timer.seconds(80)

@ -28,6 +28,10 @@ config :explorer, Explorer.Repo.PolygonZkevm,
prepare: :unnamed,
timeout: :timer.seconds(60)
config :explorer, Explorer.Repo.ZkSync,
prepare: :unnamed,
timeout: :timer.seconds(60)
config :explorer, Explorer.Repo.RSK,
prepare: :unnamed,
timeout: :timer.seconds(60)

@ -48,6 +48,7 @@ for repo <- [
Explorer.Repo.Optimism,
Explorer.Repo.PolygonEdge,
Explorer.Repo.PolygonZkevm,
Explorer.Repo.ZkSync,
Explorer.Repo.RSK,
Explorer.Repo.Shibarium,
Explorer.Repo.Suave,

@ -147,6 +147,7 @@ defmodule Explorer.Application do
Explorer.Repo.Optimism,
Explorer.Repo.PolygonEdge,
Explorer.Repo.PolygonZkevm,
Explorer.Repo.ZkSync,
Explorer.Repo.RSK,
Explorer.Repo.Shibarium,
Explorer.Repo.Suave,

@ -3,6 +3,7 @@ defmodule Explorer.Chain.Block.Schema do
alias Explorer.Chain.{Address, Block, Hash, PendingBlockOperation, Transaction, Wei, Withdrawal}
alias Explorer.Chain.Block.{Reward, SecondDegreeRelation}
alias Explorer.Chain.ZkSync.BatchBlock, as: ZkSyncBatchBlock
@chain_type_fields (case Application.compile_env(:explorer, :chain_type) do
"ethereum" ->
@ -26,6 +27,18 @@ defmodule Explorer.Chain.Block.Schema do
2
)
"zksync" ->
elem(
quote do
has_one(:zksync_batch_block, ZkSyncBatchBlock, foreign_key: :hash, references: :hash)
has_one(:zksync_batch, through: [:zksync_batch_block, :batch])
has_one(:zksync_commit_transaction, through: [:zksync_batch, :commit_transaction])
has_one(:zksync_prove_transaction, through: [:zksync_batch, :prove_transaction])
has_one(:zksync_execute_transaction, through: [:zksync_batch, :execute_transaction])
end,
2
)
_ ->
[]
end)

@ -123,7 +123,7 @@ defmodule Explorer.Chain.Import do
milliseconds.
#{@runner_options_doc}
"""
@spec all(all_options()) :: all_result()
# @spec all(all_options()) :: all_result()
def all(options) when is_map(options) do
with {:ok, runner_options_pairs} <- validate_options(options),
{:ok, valid_runner_option_pairs} <- validate_runner_options_pairs(runner_options_pairs),

@ -532,8 +532,10 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
select:
map(ctb, [
:address_hash,
:block_number,
:token_contract_address_hash,
:token_id,
:token_type,
# Used to determine if `address_hash` was a holder of `token_contract_address_hash` before
# `address_current_token_balance` is deleted in `update_tokens_holder_count`.
@ -566,43 +568,28 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
%{timeout: timeout} = options
)
when is_list(deleted_address_current_token_balances) do
final_query = derive_address_current_token_balances_grouped_query(deleted_address_current_token_balances)
new_current_token_balances_placeholders =
Enum.map(deleted_address_current_token_balances, fn deleted_balance ->
now = DateTime.utc_now()
new_current_token_balance_query =
from(new_current_token_balance in subquery(final_query),
inner_join: tb in Address.TokenBalance,
on:
tb.address_hash == new_current_token_balance.address_hash and
tb.token_contract_address_hash == new_current_token_balance.token_contract_address_hash and
((is_nil(tb.token_id) and is_nil(new_current_token_balance.token_id)) or
(tb.token_id == new_current_token_balance.token_id and
not is_nil(tb.token_id) and not is_nil(new_current_token_balance.token_id))) and
tb.block_number == new_current_token_balance.block_number,
select: %{
address_hash: new_current_token_balance.address_hash,
token_contract_address_hash: new_current_token_balance.token_contract_address_hash,
token_id: new_current_token_balance.token_id,
token_type: tb.token_type,
block_number: new_current_token_balance.block_number,
value: tb.value,
value_fetched_at: tb.value_fetched_at,
inserted_at: over(min(tb.inserted_at), :w),
updated_at: over(max(tb.updated_at), :w)
},
windows: [
w: [partition_by: [tb.address_hash, tb.token_contract_address_hash, tb.token_id]]
]
)
current_token_balance =
new_current_token_balance_query
|> repo.all()
%{
address_hash: deleted_balance.address_hash,
token_contract_address_hash: deleted_balance.token_contract_address_hash,
token_id: deleted_balance.token_id,
token_type: deleted_balance.token_type,
block_number: deleted_balance.block_number,
value: nil,
value_fetched_at: nil,
inserted_at: now,
updated_at: now
}
end)
timestamps = Import.timestamps()
result =
CurrentTokenBalances.insert_changes_list_with_and_without_token_id(
current_token_balance,
new_current_token_balances_placeholders,
repo,
timestamps,
timeout,
@ -787,43 +774,6 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
)
end
defp derive_address_current_token_balances_grouped_query(deleted_address_current_token_balances) do
initial_query =
from(tb in Address.TokenBalance,
select: %{
address_hash: tb.address_hash,
token_contract_address_hash: tb.token_contract_address_hash,
token_id: tb.token_id,
block_number: max(tb.block_number)
},
group_by: [tb.address_hash, tb.token_contract_address_hash, tb.token_id]
)
Enum.reduce(deleted_address_current_token_balances, initial_query, fn %{
address_hash: address_hash,
token_contract_address_hash:
token_contract_address_hash,
token_id: token_id
},
acc_query ->
if token_id do
from(tb in acc_query,
or_where:
tb.address_hash == ^address_hash and
tb.token_contract_address_hash == ^token_contract_address_hash and
tb.token_id == ^token_id
)
else
from(tb in acc_query,
or_where:
tb.address_hash == ^address_hash and
tb.token_contract_address_hash == ^token_contract_address_hash and
is_nil(tb.token_id)
)
end
end)
end
# `block_rewards` are linked to `blocks.hash`, but fetched by `blocks.number`, so when a block with the same number is
# inserted, the old block rewards need to be deleted, so that the old and new rewards aren't combined.
defp delete_rewards(repo, blocks_changes, %{timeout: timeout}) do

@ -0,0 +1,79 @@
defmodule Explorer.Chain.Import.Runner.ZkSync.BatchBlocks do
@moduledoc """
Bulk imports `t:Explorer.Chain.ZkSync.BatchBlock.t/0`.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Import
alias Explorer.Chain.ZkSync.BatchBlock
alias Explorer.Prometheus.Instrumenter
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [BatchBlock.t()]
@impl Import.Runner
def ecto_schema_module, do: BatchBlock
@impl Import.Runner
def option_key, do: :zksync_batch_blocks
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_zksync_batch_blocks, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:zksync_batch_blocks,
:zksync_batch_blocks
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [BatchBlock.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = _options) when is_list(changes_list) do
# Enforce ZkSync.BatchBlock ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.hash)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: BatchBlock,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :hash,
on_conflict: :nothing
)
{:ok, inserted}
end
end

@ -0,0 +1,79 @@
defmodule Explorer.Chain.Import.Runner.ZkSync.BatchTransactions do
@moduledoc """
Bulk imports `t:Explorer.Chain.ZkSync.BatchTransaction.t/0`.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Import
alias Explorer.Chain.ZkSync.BatchTransaction
alias Explorer.Prometheus.Instrumenter
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [BatchTransaction.t()]
@impl Import.Runner
def ecto_schema_module, do: BatchTransaction
@impl Import.Runner
def option_key, do: :zksync_batch_transactions
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_zksync_batch_transactions, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:zksync_batch_transactions,
:zksync_batch_transactions
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [BatchTransaction.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = _options) when is_list(changes_list) do
# Enforce ZkSync.BatchTransaction ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.hash)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: BatchTransaction,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :hash,
on_conflict: :nothing
)
{:ok, inserted}
end
end

@ -0,0 +1,103 @@
defmodule Explorer.Chain.Import.Runner.ZkSync.LifecycleTransactions do
@moduledoc """
Bulk imports `t:Explorer.Chain.ZkSync.LifecycleTransaction.t/0`.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Import
alias Explorer.Chain.ZkSync.LifecycleTransaction
alias Explorer.Prometheus.Instrumenter
import Ecto.Query, only: [from: 2]
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [LifecycleTransaction.t()]
@impl Import.Runner
def ecto_schema_module, do: LifecycleTransaction
@impl Import.Runner
def option_key, do: :zksync_lifecycle_transactions
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_zksync_lifecycle_transactions, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:zksync_lifecycle_transactions,
:zksync_lifecycle_transactions
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [LifecycleTransaction.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0)
# Enforce ZkSync.LifecycleTransaction ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.id)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: LifecycleTransaction,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :hash,
on_conflict: on_conflict
)
{:ok, inserted}
end
defp default_on_conflict do
from(
tx in LifecycleTransaction,
update: [
set: [
# don't update `id` as it is a primary key
# don't update `hash` as it is a unique index and used for the conflict target
timestamp: fragment("EXCLUDED.timestamp"),
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tx.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tx.updated_at)
]
],
where:
fragment(
"(EXCLUDED.timestamp) IS DISTINCT FROM (?)",
tx.timestamp
)
)
end
end

@ -0,0 +1,122 @@
defmodule Explorer.Chain.Import.Runner.ZkSync.TransactionBatches do
@moduledoc """
Bulk imports `t:Explorer.Chain.ZkSync.TransactionBatch.t/0`.
"""
require Ecto.Query
alias Ecto.{Changeset, Multi, Repo}
alias Explorer.Chain.Import
alias Explorer.Chain.ZkSync.TransactionBatch
alias Explorer.Prometheus.Instrumenter
import Ecto.Query, only: [from: 2]
@behaviour Import.Runner
# milliseconds
@timeout 60_000
@type imported :: [TransactionBatch.t()]
@impl Import.Runner
def ecto_schema_module, do: TransactionBatch
@impl Import.Runner
def option_key, do: :zksync_transaction_batches
@impl Import.Runner
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()}
def imported_table_row do
%{
value_type: "[#{ecto_schema_module()}.t()]",
value_description: "List of `t:#{ecto_schema_module()}.t/0`s"
}
end
@impl Import.Runner
@spec run(Multi.t(), list(), map()) :: Multi.t()
def run(multi, changes_list, %{timestamps: timestamps} = options) do
insert_options =
options
|> Map.get(option_key(), %{})
|> Map.take(~w(on_conflict timeout)a)
|> Map.put_new(:timeout, @timeout)
|> Map.put(:timestamps, timestamps)
Multi.run(multi, :insert_zksync_transaction_batches, fn repo, _ ->
Instrumenter.block_import_stage_runner(
fn -> insert(repo, changes_list, insert_options) end,
:block_referencing,
:zksync_transaction_batches,
:zksync_transaction_batches
)
end)
end
@impl Import.Runner
def timeout, do: @timeout
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) ::
{:ok, [TransactionBatch.t()]}
| {:error, [Changeset.t()]}
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0)
# Enforce ZkSync.TransactionBatch ShareLocks order (see docs: sharelock.md)
ordered_changes_list = Enum.sort_by(changes_list, & &1.number)
{:ok, inserted} =
Import.insert_changes_list(
repo,
ordered_changes_list,
for: TransactionBatch,
returning: true,
timeout: timeout,
timestamps: timestamps,
conflict_target: :number,
on_conflict: on_conflict
)
{:ok, inserted}
end
defp default_on_conflict do
from(
tb in TransactionBatch,
update: [
set: [
# don't update `number` as it is a primary key and used for the conflict target
timestamp: fragment("EXCLUDED.timestamp"),
l1_tx_count: fragment("EXCLUDED.l1_tx_count"),
l2_tx_count: fragment("EXCLUDED.l2_tx_count"),
root_hash: fragment("EXCLUDED.root_hash"),
l1_gas_price: fragment("EXCLUDED.l1_gas_price"),
l2_fair_gas_price: fragment("EXCLUDED.l2_fair_gas_price"),
start_block: fragment("EXCLUDED.start_block"),
end_block: fragment("EXCLUDED.end_block"),
commit_id: fragment("EXCLUDED.commit_id"),
prove_id: fragment("EXCLUDED.prove_id"),
execute_id: fragment("EXCLUDED.execute_id"),
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at),
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at)
]
],
where:
fragment(
"(EXCLUDED.timestamp, EXCLUDED.l1_tx_count, EXCLUDED.l2_tx_count, EXCLUDED.root_hash, EXCLUDED.l1_gas_price, EXCLUDED.l2_fair_gas_price, EXCLUDED.start_block, EXCLUDED.end_block, EXCLUDED.commit_id, EXCLUDED.prove_id, EXCLUDED.execute_id) IS DISTINCT FROM (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
tb.timestamp,
tb.l1_tx_count,
tb.l2_tx_count,
tb.root_hash,
tb.l1_gas_price,
tb.l2_fair_gas_price,
tb.start_block,
tb.end_block,
tb.commit_id,
tb.prove_id,
tb.execute_id
)
)
end
end

@ -0,0 +1,30 @@
defmodule Explorer.Chain.Import.Stage.AddressReferencing do
@moduledoc """
Imports any tables that reference `t:Explorer.Chain.Address.t/0` and that were imported by
`Explorer.Chain.Import.Stage.Addresses`.
"""
alias Explorer.Chain.Import.{Runner, Stage}
@behaviour Stage
@impl Stage
def runners,
do: [
Runner.Address.CoinBalances,
Runner.Blocks,
Runner.Address.CoinBalancesDaily
]
@impl Stage
def all_runners,
do: runners()
@impl Stage
def multis(runner_to_changes_list, options) do
{final_multi, final_remaining_runner_to_changes_list} =
Stage.single_multi(runners(), runner_to_changes_list, options)
{[final_multi], final_remaining_runner_to_changes_list}
end
end

@ -0,0 +1,26 @@
defmodule Explorer.Chain.Import.Stage.Addresses do
@moduledoc """
Imports addresses before anything else that references them because an unused address is still valid and recoverable
if the other stage(s) don't commit.
"""
alias Explorer.Chain.Import.{Runner, Stage}
@behaviour Stage
@runner Runner.Addresses
@impl Stage
def runners, do: [@runner]
@impl Stage
def all_runners,
do: runners()
@chunk_size 50
@impl Stage
def multis(runner_to_changes_list, options) do
Stage.chunk_every(runner_to_changes_list, @runner, @chunk_size, options)
end
end

@ -43,6 +43,13 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do
Runner.PolygonZkevm.BridgeOperations
]
@zksync_runners [
Runner.ZkSync.LifecycleTransactions,
Runner.ZkSync.TransactionBatches,
Runner.ZkSync.BatchTransactions,
Runner.ZkSync.BatchBlocks
]
@shibarium_runners [
Runner.Shibarium.BridgeOperations
]
@ -69,6 +76,9 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do
"ethereum" ->
@default_runners ++ @ethereum_runners
"zksync" ->
@default_runners ++ @zksync_runners
_ ->
@default_runners
end
@ -76,7 +86,8 @@ defmodule Explorer.Chain.Import.Stage.BlockReferencing do
@impl Stage
def all_runners do
@default_runners ++ @optimism_runners ++ @polygon_edge_runners ++ @polygon_zkevm_runners ++ @shibarium_runners
@default_runners ++
@optimism_runners ++ @polygon_edge_runners ++ @polygon_zkevm_runners ++ @shibarium_runners ++ @zksync_runners
end
@impl Stage

@ -14,8 +14,9 @@ defmodule Explorer.Chain.Transaction.Schema do
Wei
}
alias Explorer.Chain.PolygonZkevm.BatchTransaction
alias Explorer.Chain.PolygonZkevm.BatchTransaction, as: ZkevmBatchTransaction
alias Explorer.Chain.Transaction.{Fork, Status}
alias Explorer.Chain.ZkSync.BatchTransaction, as: ZkSyncBatchTransaction
@chain_type_fields (case Application.compile_env(:explorer, :chain_type) do
"ethereum" ->
@ -77,7 +78,11 @@ defmodule Explorer.Chain.Transaction.Schema do
"polygon_zkevm" ->
elem(
quote do
has_one(:zkevm_batch_transaction, BatchTransaction, foreign_key: :hash, references: :hash)
has_one(:zkevm_batch_transaction, ZkevmBatchTransaction,
foreign_key: :hash,
references: :hash
)
has_one(:zkevm_batch, through: [:zkevm_batch_transaction, :batch], references: :hash)
has_one(:zkevm_sequence_transaction,
@ -93,6 +98,22 @@ defmodule Explorer.Chain.Transaction.Schema do
2
)
"zksync" ->
elem(
quote do
has_one(:zksync_batch_transaction, ZkSyncBatchTransaction,
foreign_key: :hash,
references: :hash
)
has_one(:zksync_batch, through: [:zksync_batch_transaction, :batch])
has_one(:zksync_commit_transaction, through: [:zksync_batch, :commit_transaction])
has_one(:zksync_prove_transaction, through: [:zksync_batch, :prove_transaction])
has_one(:zksync_execute_transaction, through: [:zksync_batch, :execute_transaction])
end,
2
)
_ ->
[]
end)
@ -195,7 +216,7 @@ defmodule Explorer.Chain.Transaction do
alias ABI.FunctionSelector
alias Ecto.Association.NotLoaded
alias Ecto.Changeset
alias Explorer.{Chain, Helper, PagingOptions, Repo, SortingHelper}
alias Explorer.{Chain, PagingOptions, Repo, SortingHelper}
alias Explorer.Chain.{
Block.Reward,
@ -203,10 +224,8 @@ defmodule Explorer.Chain.Transaction do
Data,
DenormalizationHelper,
Hash,
Log,
SmartContract,
SmartContract.Proxy,
Token,
TokenTransfer,
Transaction,
Wei
@ -216,12 +235,12 @@ defmodule Explorer.Chain.Transaction do
@optional_attrs ~w(max_priority_fee_per_gas max_fee_per_gas block_hash block_number block_consensus block_timestamp created_contract_address_hash cumulative_gas_used earliest_processing_start
error gas_price gas_used index created_contract_code_indexed_at status
to_address_hash revert_reason type has_error_in_internal_txs)a
to_address_hash revert_reason type has_error_in_internal_txs r s v)a
@optimism_optional_attrs ~w(l1_fee l1_fee_scalar l1_gas_price l1_gas_used l1_tx_origin l1_block_number)a
@suave_optional_attrs ~w(execution_node_hash wrapped_type wrapped_nonce wrapped_to_address_hash wrapped_gas wrapped_gas_price wrapped_max_priority_fee_per_gas wrapped_max_fee_per_gas wrapped_value wrapped_input wrapped_v wrapped_r wrapped_s wrapped_hash)a
@required_attrs ~w(from_address_hash gas hash input nonce r s v value)a
@required_attrs ~w(from_address_hash gas hash input nonce value)a
@empty_attrs ~w()a
@ -1155,98 +1174,6 @@ defmodule Explorer.Chain.Transaction do
end
end
@api_true [api?: true]
@transaction_fee_event_signature "0x99e7b0ba56da2819c37c047f0511fd2bf6c9b4e27b4a979a19d6da0f74be8155"
@transaction_fee_event_abi [
%{
"anonymous" => false,
"inputs" => [
%{
"indexed" => false,
"internalType" => "address",
"name" => "token",
"type" => "address"
},
%{
"indexed" => false,
"internalType" => "uint256",
"name" => "totalFee",
"type" => "uint256"
},
%{
"indexed" => false,
"internalType" => "address",
"name" => "validator",
"type" => "address"
},
%{
"indexed" => false,
"internalType" => "uint256",
"name" => "validatorFee",
"type" => "uint256"
},
%{
"indexed" => false,
"internalType" => "address",
"name" => "dapp",
"type" => "address"
},
%{
"indexed" => false,
"internalType" => "uint256",
"name" => "dappFee",
"type" => "uint256"
}
],
"name" => "TransactionFee",
"type" => "event"
}
]
def maybe_prepare_stability_fees(transactions) do
if Application.get_env(:explorer, :chain_type) == "stability" do
maybe_prepare_stability_fees_inner(transactions)
else
transactions
end
end
defp maybe_prepare_stability_fees_inner(transactions) when is_list(transactions) do
{transactions, _tokens_acc} =
Enum.map_reduce(transactions, %{}, fn transaction, tokens_acc ->
case Log.fetch_log_by_tx_hash_and_first_topic(transaction.hash, @transaction_fee_event_signature, @api_true) do
fee_log when not is_nil(fee_log) ->
{:ok, _selector, mapping} = Log.find_and_decode(@transaction_fee_event_abi, fee_log, transaction.hash)
[{"token", "address", false, token_address_hash}, _, _, _, _, _] = mapping
{token, new_tokens_acc} = check_tokens_acc(bytes_to_address_hash(token_address_hash), tokens_acc)
{%Transaction{transaction | transaction_fee_log: mapping, transaction_fee_token: token}, new_tokens_acc}
_ ->
{transaction, tokens_acc}
end
end)
transactions
end
defp maybe_prepare_stability_fees_inner(transaction) do
[transaction] = maybe_prepare_stability_fees_inner([transaction])
transaction
end
defp check_tokens_acc(token_address_hash, tokens_acc) do
if Map.has_key?(tokens_acc, token_address_hash) do
{tokens_acc[token_address_hash], tokens_acc}
else
token = Token.get_by_contract_address_hash(token_address_hash, @api_true)
{token, Map.put(tokens_acc, token_address_hash, token)}
end
end
def bytes_to_address_hash(bytes), do: %Hash{byte_count: 20, bytes: bytes}
@doc """
@ -1689,50 +1616,6 @@ defmodule Explorer.Chain.Transaction do
}
end
@suave_bid_event "0x83481d5b04dea534715acad673a8177a46fc93882760f36bdc16ccac439d504e"
@spec suave_parse_allowed_peekers(Ecto.Schema.has_many(Log.t())) :: [String.t()]
def suave_parse_allowed_peekers(%NotLoaded{}), do: []
def suave_parse_allowed_peekers(logs) do
suave_bid_contracts =
Application.get_all_env(:explorer)[Transaction][:suave_bid_contracts]
|> String.split(",")
|> Enum.map(fn sbc -> String.downcase(String.trim(sbc)) end)
bid_event =
Enum.find(logs, fn log ->
sanitize_log_first_topic(log.first_topic) == @suave_bid_event &&
Enum.member?(suave_bid_contracts, String.downcase(Hash.to_string(log.address_hash)))
end)
if is_nil(bid_event) do
[]
else
[_bid_id, _decryption_condition, allowed_peekers] =
Helper.decode_data(bid_event.data, [{:bytes, 16}, {:uint, 64}, {:array, :address}])
Enum.map(allowed_peekers, fn peeker ->
"0x" <> Base.encode16(peeker, case: :lower)
end)
end
end
defp sanitize_log_first_topic(first_topic) do
if is_nil(first_topic) do
""
else
sanitized =
if is_binary(first_topic) do
first_topic
else
Hash.to_string(first_topic)
end
String.downcase(sanitized)
end
end
@doc """
The fee a `transaction` paid for the `t:Explorer.Transaction.t/0` `gas`

@ -0,0 +1,37 @@
defmodule Explorer.Chain.ZkSync.BatchBlock do
@moduledoc "Models a list of blocks related to a batch for ZkSync."
use Explorer.Schema
alias Explorer.Chain.{Block, Hash}
alias Explorer.Chain.ZkSync.TransactionBatch
@required_attrs ~w(batch_number hash)a
@type t :: %__MODULE__{
batch_number: non_neg_integer(),
batch: %Ecto.Association.NotLoaded{} | TransactionBatch.t() | nil,
hash: Hash.t(),
block: %Ecto.Association.NotLoaded{} | Block.t() | nil
}
@primary_key false
schema "zksync_batch_l2_blocks" do
belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer)
belongs_to(:block, Block, foreign_key: :hash, primary_key: true, references: :hash, type: Hash.Full)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = items, attrs \\ %{}) do
items
|> cast(attrs, @required_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:batch_number)
|> unique_constraint(:hash)
end
end

@ -0,0 +1,37 @@
defmodule Explorer.Chain.ZkSync.BatchTransaction do
@moduledoc "Models a list of transactions related to a batch for ZkSync."
use Explorer.Schema
alias Explorer.Chain.{Hash, Transaction}
alias Explorer.Chain.ZkSync.TransactionBatch
@required_attrs ~w(batch_number hash)a
@type t :: %__MODULE__{
batch_number: non_neg_integer(),
batch: %Ecto.Association.NotLoaded{} | TransactionBatch.t() | nil,
hash: Hash.t(),
l2_transaction: %Ecto.Association.NotLoaded{} | Transaction.t() | nil
}
@primary_key false
schema "zksync_batch_l2_transactions" do
belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer)
belongs_to(:l2_transaction, Transaction, foreign_key: :hash, primary_key: true, references: :hash, type: Hash.Full)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = transactions, attrs \\ %{}) do
transactions
|> cast(attrs, @required_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:batch_number)
|> unique_constraint(:hash)
end
end

@ -0,0 +1,38 @@
defmodule Explorer.Chain.ZkSync.LifecycleTransaction do
@moduledoc "Models an L1 lifecycle transaction for ZkSync."
use Explorer.Schema
alias Explorer.Chain.Hash
alias Explorer.Chain.ZkSync.TransactionBatch
@required_attrs ~w(id hash timestamp)a
@type t :: %__MODULE__{
hash: Hash.t(),
timestamp: DateTime.t()
}
@primary_key {:id, :integer, autogenerate: false}
schema "zksync_lifecycle_l1_transactions" do
field(:hash, Hash.Full)
field(:timestamp, :utc_datetime_usec)
has_many(:committed_batches, TransactionBatch, foreign_key: :commit_id)
has_many(:proven_batches, TransactionBatch, foreign_key: :prove_id)
has_many(:executed_batches, TransactionBatch, foreign_key: :execute_id)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = txn, attrs \\ %{}) do
txn
|> cast(attrs, @required_attrs)
|> validate_required(@required_attrs)
|> unique_constraint(:id)
end
end

@ -0,0 +1,339 @@
defmodule Explorer.Chain.ZkSync.Reader do
@moduledoc "Contains read functions for zksync modules."
import Ecto.Query,
only: [
from: 2,
limit: 2,
order_by: 2,
where: 2,
where: 3
]
import Explorer.Chain, only: [select_repo: 1]
alias Explorer.Chain.ZkSync.{
BatchTransaction,
LifecycleTransaction,
TransactionBatch
}
alias Explorer.{Chain, PagingOptions, Repo}
@doc """
Receives total amount of batches imported to the `zksync_transaction_batches` table.
## Parameters
- `options`: passed to `Chain.select_repo()`
## Returns
Total amount of batches
"""
@spec batches_count(keyword()) :: any()
def batches_count(options) do
TransactionBatch
|> select_repo(options).aggregate(:count, timeout: :infinity)
end
@doc """
Receives the batch from the `zksync_transaction_batches` table by using its number or the latest batch if `:latest` is used.
## Parameters
- `number`: could be either the batch number or `:latest` to get the latest available in DB batch
- `options`: passed to `Chain.select_repo()`
## Returns
- `{:ok, Explorer.Chain.ZkSync.TransactionBatch}` if the batch found
- `{:error, :not_found}` if there is no batch with such number
"""
@spec batch(:latest | binary() | integer(), keyword()) ::
{:error, :not_found} | {:ok, Explorer.Chain.ZkSync.TransactionBatch}
def batch(number, options)
def batch(:latest, options) when is_list(options) do
TransactionBatch
|> order_by(desc: :number)
|> limit(1)
|> select_repo(options).one()
|> case do
nil -> {:error, :not_found}
batch -> {:ok, batch}
end
end
def batch(number, options)
when (is_integer(number) or is_binary(number)) and
is_list(options) do
necessity_by_association = Keyword.get(options, :necessity_by_association, %{})
TransactionBatch
|> where(number: ^number)
|> Chain.join_associations(necessity_by_association)
|> select_repo(options).one()
|> case do
nil -> {:error, :not_found}
batch -> {:ok, batch}
end
end
@doc """
Receives a list of batches from the `zksync_transaction_batches` table within the range of batch numbers
## Parameters
- `start_number`: The start of the batch numbers range.
- `end_number`: The end of the batch numbers range.
- `options`: Options passed to `Chain.select_repo()`.
## Returns
- A list of `Explorer.Chain.ZkSync.TransactionBatch` if at least one batch exists within the range.
- An empty list (`[]`) if no batches within the range are found in the database.
"""
@spec batches(integer(), integer(), keyword()) :: [Explorer.Chain.ZkSync.TransactionBatch]
def batches(start_number, end_number, options)
when is_integer(start_number) and
is_integer(end_number) and
is_list(options) do
necessity_by_association = Keyword.get(options, :necessity_by_association, %{})
base_query = from(tb in TransactionBatch, order_by: [desc: tb.number])
base_query
|> where([tb], tb.number >= ^start_number and tb.number <= ^end_number)
|> Chain.join_associations(necessity_by_association)
|> select_repo(options).all()
end
@doc """
Receives a list of batches from the `zksync_transaction_batches` table with the numbers defined in the input list.
## Parameters
- `numbers`: The list of batch numbers to retrieve from the database.
- `options`: Options passed to `Chain.select_repo()`.
## Returns
- A list of `Explorer.Chain.ZkSync.TransactionBatch` if at least one batch matches the numbers from the list. The output list could be less than the input list.
- An empty list (`[]`) if no batches with numbers from the list are found.
"""
@spec batches(maybe_improper_list(integer(), []), keyword()) :: [Explorer.Chain.ZkSync.TransactionBatch]
def batches(numbers, options)
when is_list(numbers) and
is_list(options) do
necessity_by_association = Keyword.get(options, :necessity_by_association, %{})
base_query = from(tb in TransactionBatch, order_by: [desc: tb.number])
base_query
|> where([tb], tb.number in ^numbers)
|> Chain.join_associations(necessity_by_association)
|> select_repo(options).all()
end
@doc """
Receives a list of batches from the `zksync_transaction_batches` table.
## Parameters
- `options`: Options passed to `Chain.select_repo()`. (Optional)
## Returns
- If the option `confirmed?` is set, returns the ten latest committed batches (`Explorer.Chain.ZkSync.TransactionBatch`).
- Returns a list of `Explorer.Chain.ZkSync.TransactionBatch` based on the paging options if `confirmed?` is not set.
"""
@spec batches(keyword()) :: [Explorer.Chain.ZkSync.TransactionBatch]
@spec batches() :: [Explorer.Chain.ZkSync.TransactionBatch]
def batches(options \\ []) when is_list(options) do
necessity_by_association = Keyword.get(options, :necessity_by_association, %{})
base_query =
from(tb in TransactionBatch,
order_by: [desc: tb.number]
)
query =
if Keyword.get(options, :confirmed?, false) do
base_query
|> Chain.join_associations(necessity_by_association)
|> where([tb], not is_nil(tb.commit_id) and tb.commit_id > 0)
|> limit(10)
else
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options())
base_query
|> Chain.join_associations(necessity_by_association)
|> page_batches(paging_options)
|> limit(^paging_options.page_size)
end
select_repo(options).all(query)
end
@doc """
Receives a list of transactions from the `zksync_batch_l2_transactions` table included in a specific batch.
## Parameters
- `batch_number`: The number of batch which transactions were included to L1 as part of.
- `options`: Options passed to `Chain.select_repo()`. (Optional)
## Returns
- A list of `Explorer.Chain.ZkSync.BatchTransaction` belonging to the specified batch.
"""
@spec batch_transactions(non_neg_integer()) :: [Explorer.Chain.ZkSync.BatchTransaction]
@spec batch_transactions(non_neg_integer(), keyword()) :: [Explorer.Chain.ZkSync.BatchTransaction]
def batch_transactions(batch_number, options \\ [])
when is_integer(batch_number) or
is_binary(batch_number) do
query = from(batch in BatchTransaction, where: batch.batch_number == ^batch_number)
select_repo(options).all(query)
end
@doc """
Gets the number of the earliest batch in the `zksync_transaction_batches` table where the commitment transaction is not set.
Batch #0 is filtered out, as it does not have a linked commitment transaction.
## Returns
- The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as committed or Batch #0 is the only available batch.
"""
@spec earliest_sealed_batch_number() :: non_neg_integer() | nil
def earliest_sealed_batch_number do
query =
from(tb in TransactionBatch,
select: tb.number,
where: is_nil(tb.commit_id) and tb.number > 0,
order_by: [asc: tb.number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Gets the number of the earliest batch in the `zksync_transaction_batches` table where the proving transaction is not set.
Batch #0 is filtered out, as it does not have a linked proving transaction.
## Returns
- The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as proven or Batch #0 is the only available batch.
"""
@spec earliest_unproven_batch_number() :: non_neg_integer() | nil
def earliest_unproven_batch_number do
query =
from(tb in TransactionBatch,
select: tb.number,
where: is_nil(tb.prove_id) and tb.number > 0,
order_by: [asc: tb.number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Gets the number of the earliest batch in the `zksync_transaction_batches` table where the executing transaction is not set.
Batch #0 is filtered out, as it does not have a linked executing transaction.
## Returns
- The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as executed or Batch #0 is the only available batch.
"""
@spec earliest_unexecuted_batch_number() :: non_neg_integer() | nil
def earliest_unexecuted_batch_number do
query =
from(tb in TransactionBatch,
select: tb.number,
where: is_nil(tb.execute_id) and tb.number > 0,
order_by: [asc: tb.number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Gets the number of the oldest batch from the `zksync_transaction_batches` table.
## Returns
- The number of a batch if it exists, otherwise `nil`. `nil` means that there is no batches imported yet.
"""
@spec oldest_available_batch_number() :: non_neg_integer() | nil
def oldest_available_batch_number do
query =
from(tb in TransactionBatch,
select: tb.number,
order_by: [asc: tb.number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Gets the number of the youngest (the most recent) imported batch from the `zksync_transaction_batches` table.
## Returns
- The number of a batch if it exists, otherwise `nil`. `nil` means that there is no batches imported yet.
"""
@spec latest_available_batch_number() :: non_neg_integer() | nil
def latest_available_batch_number do
query =
from(tb in TransactionBatch,
select: tb.number,
order_by: [desc: tb.number],
limit: 1
)
query
|> Repo.one()
end
@doc """
Reads a list of L1 transactions by their hashes from the `zksync_lifecycle_l1_transactions` table.
## Parameters
- `l1_tx_hashes`: A list of hashes to retrieve L1 transactions for.
## Returns
- A list of `Explorer.Chain.ZkSync.LifecycleTransaction` corresponding to the hashes from the input list. The output list may be smaller than the input list.
"""
@spec lifecycle_transactions(maybe_improper_list(binary(), [])) :: [Explorer.Chain.ZkSync.LifecycleTransaction]
def lifecycle_transactions(l1_tx_hashes) do
query =
from(
lt in LifecycleTransaction,
select: {lt.hash, lt.id},
where: lt.hash in ^l1_tx_hashes
)
Repo.all(query, timeout: :infinity)
end
@doc """
Determines the next index for the L1 transaction available in the `zksync_lifecycle_l1_transactions` table.
## Returns
- The next available index. If there are no L1 transactions imported yet, it will return `1`.
"""
@spec next_id() :: non_neg_integer()
def next_id do
query =
from(lt in LifecycleTransaction,
select: lt.id,
order_by: [desc: lt.id],
limit: 1
)
last_id =
query
|> Repo.one()
|> Kernel.||(0)
last_id + 1
end
defp page_batches(query, %PagingOptions{key: nil}), do: query
defp page_batches(query, %PagingOptions{key: {number}}) do
from(tb in query, where: tb.number < ^number)
end
end

@ -0,0 +1,83 @@
defmodule Explorer.Chain.ZkSync.TransactionBatch do
@moduledoc "Models a batch of transactions for ZkSync."
use Explorer.Schema
alias Explorer.Chain.{
Block,
Hash,
Wei
}
alias Explorer.Chain.ZkSync.{BatchTransaction, LifecycleTransaction}
@optional_attrs ~w(commit_id prove_id execute_id)a
@required_attrs ~w(number timestamp l1_tx_count l2_tx_count root_hash l1_gas_price l2_fair_gas_price start_block end_block)a
@type t :: %__MODULE__{
number: non_neg_integer(),
timestamp: DateTime.t(),
l1_tx_count: non_neg_integer(),
l2_tx_count: non_neg_integer(),
root_hash: Hash.t(),
l1_gas_price: Wei.t(),
l2_fair_gas_price: Wei.t(),
start_block: Block.block_number(),
end_block: Block.block_number(),
commit_id: non_neg_integer() | nil,
commit_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil,
prove_id: non_neg_integer() | nil,
prove_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil,
execute_id: non_neg_integer() | nil,
execute_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil
}
@primary_key {:number, :integer, autogenerate: false}
schema "zksync_transaction_batches" do
field(:timestamp, :utc_datetime_usec)
field(:l1_tx_count, :integer)
field(:l2_tx_count, :integer)
field(:root_hash, Hash.Full)
field(:l1_gas_price, Wei)
field(:l2_fair_gas_price, Wei)
field(:start_block, :integer)
field(:end_block, :integer)
belongs_to(:commit_transaction, LifecycleTransaction,
foreign_key: :commit_id,
references: :id,
type: :integer
)
belongs_to(:prove_transaction, LifecycleTransaction,
foreign_key: :prove_id,
references: :id,
type: :integer
)
belongs_to(:execute_transaction, LifecycleTransaction,
foreign_key: :execute_id,
references: :id,
type: :integer
)
has_many(:l2_transactions, BatchTransaction, foreign_key: :batch_number)
timestamps()
end
@doc """
Validates that the `attrs` are valid.
"""
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t()
def changeset(%__MODULE__{} = batches, attrs \\ %{}) do
batches
|> cast(attrs, @required_attrs ++ @optional_attrs)
|> validate_required(@required_attrs)
|> foreign_key_constraint(:commit_id)
|> foreign_key_constraint(:prove_id)
|> foreign_key_constraint(:execute_id)
|> unique_constraint(:number)
end
end

@ -181,6 +181,30 @@ defmodule Explorer.Repo do
end
end
defmodule ZkSync do
use Ecto.Repo,
otp_app: :explorer,
adapter: Ecto.Adapters.Postgres
def init(_, opts) do
db_url = Application.get_env(:explorer, __MODULE__)[:url]
repo_conf = Application.get_env(:explorer, __MODULE__)
merged =
%{url: db_url}
|> ConfigHelper.get_db_config()
|> Keyword.merge(repo_conf, fn
_key, v1, nil -> v1
_key, nil, v2 -> v2
_, _, v2 -> v2
end)
Application.put_env(:explorer, __MODULE__, merged)
{:ok, Keyword.put(opts, :url, db_url)}
end
end
defmodule RSK do
use Ecto.Repo,
otp_app: :explorer,

@ -0,0 +1,17 @@
defmodule Explorer.Repo.ZkSync.Migrations.MakeTransactionRSVOptional do
use Ecto.Migration
def change do
alter table(:transactions) do
modify(:r, :numeric, precision: 100, null: true)
end
alter table(:transactions) do
modify(:s, :numeric, precision: 100, null: true)
end
alter table(:transactions) do
modify(:v, :numeric, precision: 100, null: true)
end
end
end

@ -0,0 +1,82 @@
defmodule Explorer.Repo.ZkSync.Migrations.CreateZkSyncTables do
use Ecto.Migration
def change do
create table(:zksync_lifecycle_l1_transactions, primary_key: false) do
add(:id, :integer, null: false, primary_key: true)
add(:hash, :bytea, null: false)
add(:timestamp, :"timestamp without time zone", null: false)
timestamps(null: false, type: :utc_datetime_usec)
end
create(unique_index(:zksync_lifecycle_l1_transactions, :hash))
create table(:zksync_transaction_batches, primary_key: false) do
add(:number, :integer, null: false, primary_key: true)
add(:timestamp, :"timestamp without time zone", null: false)
add(:l1_tx_count, :integer, null: false)
add(:l2_tx_count, :integer, null: false)
add(:root_hash, :bytea, null: false)
add(:l1_gas_price, :numeric, precision: 100, null: false)
add(:l2_fair_gas_price, :numeric, precision: 100, null: false)
add(:start_block, :integer, null: false)
add(:end_block, :integer, null: false)
add(
:commit_id,
references(:zksync_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer),
null: true
)
add(
:prove_id,
references(:zksync_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer),
null: true
)
add(
:execute_id,
references(:zksync_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer),
null: true
)
timestamps(null: false, type: :utc_datetime_usec)
end
create table(:zksync_batch_l2_transactions, primary_key: false) do
add(
:batch_number,
references(:zksync_transaction_batches,
column: :number,
on_delete: :delete_all,
on_update: :update_all,
type: :integer
),
null: false
)
add(:hash, :bytea, null: false, primary_key: true)
timestamps(null: false, type: :utc_datetime_usec)
end
create(index(:zksync_batch_l2_transactions, :batch_number))
create table(:zksync_batch_l2_blocks, primary_key: false) do
add(
:batch_number,
references(:zksync_transaction_batches,
column: :number,
on_delete: :delete_all,
on_update: :update_all,
type: :integer
),
null: false
)
add(:hash, :bytea, null: false, primary_key: true)
timestamps(null: false, type: :utc_datetime_usec)
end
create(index(:zksync_batch_l2_blocks, :batch_number))
end
end

@ -99,7 +99,7 @@ defmodule Explorer.Chain.Import.Runner.BlocksTest do
]
}} = run_block_consensus_change(block, true, options)
assert count(Address.CurrentTokenBalance) == 0
assert %{value: nil} = Repo.one(Address.CurrentTokenBalance)
end
test "delete_address_current_token_balances does not delete rows with matching block number when consensus is false",
@ -118,100 +118,6 @@ defmodule Explorer.Chain.Import.Runner.BlocksTest do
assert count(Address.CurrentTokenBalance) == count
end
test "derive_address_current_token_balances inserts rows if there is an address_token_balance left for the rows deleted by delete_address_current_token_balances",
%{consensus_block: %{number: block_number} = block, options: options} do
token = insert(:token)
token_contract_address_hash = token.contract_address_hash
%Address{hash: address_hash} =
insert_address_with_token_balances(%{
previous: %{value: 1},
current: %{block_number: block_number, value: 2},
token_contract_address_hash: token_contract_address_hash
})
# Token must exist with non-`nil` `holder_count` for `blocks_update_token_holder_counts` to update
update_holder_count!(token_contract_address_hash, 1)
assert count(Address.TokenBalance) == 2
assert count(Address.CurrentTokenBalance) == 1
previous_block_number = block_number - 1
insert(:block, number: block_number, consensus: true)
assert {:ok,
%{
delete_address_current_token_balances: [
%{
address_hash: ^address_hash,
token_contract_address_hash: ^token_contract_address_hash
}
],
delete_address_token_balances: [
%{
address_hash: ^address_hash,
token_contract_address_hash: ^token_contract_address_hash,
block_number: ^block_number
}
],
derive_address_current_token_balances: [
%{
address_hash: ^address_hash,
token_contract_address_hash: ^token_contract_address_hash,
block_number: ^previous_block_number
}
],
# no updates because it both deletes and derives a holder
blocks_update_token_holder_counts: []
}} = run_block_consensus_change(block, true, options)
assert count(Address.TokenBalance) == 1
assert count(Address.CurrentTokenBalance) == 1
previous_value = Decimal.new(1)
assert %Address.CurrentTokenBalance{block_number: ^previous_block_number, value: ^previous_value} =
Repo.get_by(Address.CurrentTokenBalance,
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash
)
end
test "a non-holder reverting to a holder increases the holder_count",
%{consensus_block: %{hash: block_hash, miner_hash: miner_hash, number: block_number}, options: options} do
token = insert(:token)
token_contract_address_hash = token.contract_address_hash
non_holder_reverts_to_holder(%{
current: %{block_number: block_number},
token_contract_address_hash: token_contract_address_hash
})
# Token must exist with non-`nil` `holder_count` for `blocks_update_token_holder_counts` to update
update_holder_count!(token_contract_address_hash, 0)
insert(:block, number: block_number, consensus: true)
block_params = params_for(:block, hash: block_hash, miner_hash: miner_hash, number: block_number, consensus: true)
%Ecto.Changeset{valid?: true, changes: block_changes} = Block.changeset(%Block{}, block_params)
changes_list = [block_changes]
assert {:ok,
%{
blocks_update_token_holder_counts: [
%{
contract_address_hash: ^token_contract_address_hash,
holder_count: 1
}
]
}} =
Multi.new()
|> Blocks.run(changes_list, options)
|> Repo.transaction()
end
test "a holder reverting to a non-holder decreases the holder_count",
%{consensus_block: %{hash: block_hash, miner_hash: miner_hash, number: block_number}, options: options} do
token = insert(:token)

@ -2144,94 +2144,7 @@ defmodule Explorer.Chain.ImportTest do
}
})
assert is_nil(
Repo.get_by(Address.CurrentTokenBalance,
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash
)
)
assert is_nil(
Repo.get_by(Address.TokenBalance,
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash,
block_number: block_number
)
)
end
test "address_current_token_balances is derived during reorgs" do
%Block{number: block_number} = insert(:block, consensus: true)
previous_block_number = block_number - 1
%Address.TokenBalance{
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash,
value: previous_value,
block_number: previous_block_number
} = insert(:token_balance, block_number: previous_block_number)
address = Repo.get(Address, address_hash)
%Address.TokenBalance{
address_hash: ^address_hash,
token_contract_address_hash: token_contract_address_hash,
value: current_value,
block_number: ^block_number
} =
insert(:token_balance,
address: address,
token_contract_address_hash: token_contract_address_hash,
block_number: block_number
)
refute current_value == previous_value
%Address.CurrentTokenBalance{
address_hash: ^address_hash,
token_contract_address_hash: ^token_contract_address_hash,
block_number: ^block_number
} =
insert(:address_current_token_balance,
address: address,
token_contract_address_hash: token_contract_address_hash,
block_number: block_number,
value: current_value
)
miner_hash_after = address_hash()
from_address_hash_after = address_hash()
block_hash_after = block_hash()
assert {:ok, _} =
Import.all(%{
addresses: %{
params: [
%{hash: miner_hash_after},
%{hash: from_address_hash_after}
]
},
blocks: %{
params: [
%{
consensus: true,
difficulty: 1,
gas_limit: 1,
gas_used: 1,
hash: block_hash_after,
miner_hash: miner_hash_after,
nonce: 1,
number: block_number,
parent_hash: block_hash(),
size: 1,
timestamp: Timex.parse!("2019-01-01T02:00:00Z", "{ISO:Extended:Z}"),
total_difficulty: 1
}
]
}
})
assert %Address.CurrentTokenBalance{block_number: ^previous_block_number, value: ^previous_value} =
assert %{value: nil} =
Repo.get_by(Address.CurrentTokenBalance,
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash
@ -2245,100 +2158,5 @@ defmodule Explorer.Chain.ImportTest do
)
)
end
test "address_token_balances and address_current_token_balances can be replaced during reorgs" do
%Block{number: block_number} = insert(:block, consensus: true)
value_before = Decimal.new(1)
%Address{hash: address_hash} = address = insert(:address)
%Address.TokenBalance{
address_hash: ^address_hash,
token_contract_address_hash: token_contract_address_hash,
block_number: ^block_number
} = insert(:token_balance, address: address, block_number: block_number, value: value_before)
%Address.CurrentTokenBalance{
address_hash: ^address_hash,
token_contract_address_hash: ^token_contract_address_hash,
block_number: ^block_number
} =
insert(:address_current_token_balance,
address: address,
token_contract_address_hash: token_contract_address_hash,
block_number: block_number,
value: value_before
)
miner_hash_after = address_hash()
from_address_hash_after = address_hash()
block_hash_after = block_hash()
value_after = Decimal.add(value_before, 1)
assert {:ok, _} =
Import.all(%{
addresses: %{
params: [
%{hash: address_hash},
%{hash: token_contract_address_hash},
%{hash: miner_hash_after},
%{hash: from_address_hash_after}
]
},
address_token_balances: %{
params: [
%{
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash,
block_number: block_number,
value: value_after,
token_type: "ERC-20"
}
]
},
address_current_token_balances: %{
params: [
%{
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash,
block_number: block_number,
value: value_after,
token_type: "ERC-20"
}
]
},
blocks: %{
params: [
%{
consensus: true,
difficulty: 1,
gas_limit: 1,
gas_used: 1,
hash: block_hash_after,
miner_hash: miner_hash_after,
nonce: 1,
number: block_number,
parent_hash: block_hash(),
size: 1,
timestamp: Timex.parse!("2019-01-01T02:00:00Z", "{ISO:Extended:Z}"),
total_difficulty: 1
}
]
}
})
assert %Address.CurrentTokenBalance{value: ^value_after} =
Repo.get_by(Address.CurrentTokenBalance,
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash
)
assert %Address.TokenBalance{value: ^value_after} =
Repo.get_by(Address.TokenBalance,
address_hash: address_hash,
token_contract_address_hash: token_contract_address_hash,
block_number: block_number
)
end
end
end

@ -155,6 +155,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.TransactionBatch do
end
defp fetch_and_save_batches(batch_start, batch_end, json_rpc_named_arguments) do
# For every batch from batch_start to batch_end request the batch info
requests =
batch_start
|> Range.new(batch_end, 1)
@ -171,6 +172,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.TransactionBatch do
{:ok, responses} = Helper.repeated_call(&json_rpc/2, [requests, json_rpc_named_arguments], error_message, 3)
# For every batch info extract batches' L1 sequence tx and L1 verify tx
{sequence_hashes, verify_hashes} =
responses
|> Enum.reduce({[], []}, fn res, {sequences, verifies} = _acc ->
@ -194,8 +196,10 @@ defmodule Indexer.Fetcher.PolygonZkevm.TransactionBatch do
{sequences, verifies}
end)
# All L1 transactions in one list without repetition
l1_tx_hashes = Enum.uniq(sequence_hashes ++ verify_hashes)
# Receive all IDs for L1 txs
hash_to_id =
l1_tx_hashes
|> Reader.lifecycle_transactions()
@ -203,6 +207,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.TransactionBatch do
Map.put(acc, hash.bytes, id)
end)
# For every batch build batch representation, collect associated L1 and L2 transactions
{batches_to_import, l2_txs_to_import, l1_txs_to_import, _, _} =
responses
|> Enum.reduce({[], [], [], Reader.next_id(), hash_to_id}, fn res,
@ -222,16 +227,19 @@ defmodule Indexer.Fetcher.PolygonZkevm.TransactionBatch do
acc_input_hash = Map.get(res.result, "accInputHash")
state_root = Map.get(res.result, "stateRoot")
# Get ID for sequence transaction (new ID if the batch is just sequenced)
{sequence_id, l1_txs, next_id, hash_to_id} =
res.result
|> get_tx_hash("sendSequencesTxHash")
|> handle_tx_hash(hash_to_id, next_id, l1_txs, false)
# Get ID for verify transaction (new ID if the batch is just verified)
{verify_id, l1_txs, next_id, hash_to_id} =
res.result
|> get_tx_hash("verifyBatchTxHash")
|> handle_tx_hash(hash_to_id, next_id, l1_txs, true)
# Associate every transaction from batch with the batch number
l2_txs_append =
l2_transaction_hashes
|> Kernel.||([])
@ -256,6 +264,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.TransactionBatch do
{[batch | batches], l2_txs ++ l2_txs_append, l1_txs, next_id, hash_to_id}
end)
# Update batches list, L1 transactions list and L2 transaction list
{:ok, _} =
Chain.import(%{
polygon_zkevm_lifecycle_transactions: %{params: l1_txs_to_import},
@ -267,6 +276,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.TransactionBatch do
confirmed_batches =
Enum.filter(batches_to_import, fn batch -> not is_nil(batch.sequence_id) and batch.sequence_id > 0 end)
# Publish update for open batches Views in BS app with the new confirmed batches
if not Enum.empty?(confirmed_batches) do
Publisher.broadcast([{:zkevm_confirmed_batches, confirmed_batches}], :realtime)
end

@ -0,0 +1,242 @@
defmodule Indexer.Fetcher.ZkSync.BatchesStatusTracker do
@moduledoc """
Updates batches statuses and imports historical batches to the `zksync_transaction_batches` table.
Repetitiveness is supported by sending the following statuses every `recheck_interval` seconds:
- `:check_committed`: Discover batches committed to L1
- `:check_proven`: Discover batches proven in L1
- `:check_executed`: Discover batches executed on L1
- `:recover_batches`: Recover missed batches found during the handling of the three previous messages
- `:check_historical`: Check if the imported batches chain does not start with Batch #0
The initial message is `:check_committed`. If it is discovered that updating batches
in the `zksync_transaction_batches` table is not possible because some are missing,
`:recover_batches` is sent. The next messages are `:check_proven` and `:check_executed`.
Both could result in sending `:recover_batches` as well.
The logic ensures that every handler emits the `:recover_batches` message to return to
the previous "progressing" state. If `:recover_batches` is called during handling `:check_committed`,
it will be sent again after finishing batch recovery. Similar logic applies to `:check_proven` and
`:check_executed`.
The last message in the loop is `:check_historical`.
|---------------------------------------------------------------------------|
|-> check_committed -> check_proven -> check_executed -> check_historical ->|
| ^ | ^ | ^
v | v | v |
recover_batches recover_batches recover_batches
If a batch status change is discovered during handling of `check_committed`, `check_proven`,
or `check_executed` messages, the corresponding L1 transactions are imported and associated
with the batches. Rollup transactions and blocks are not re-associated since it is assumed
to be done by `Indexer.Fetcher.ZkSync.TransactionBatch` or during handling of
the `recover_batches` message.
The `recover_batches` handler downloads batch information from RPC and sets its actual L1 state
by linking with L1 transactions.
The `check_historical` message initiates the check if the tail of the batch chain is Batch 0.
If the tail is missing, batches are downloaded from RPC in chunks of `batches_max_range` in every
iteration. The batches are imported together with associated L1 transactions.
"""
use GenServer
use Indexer.Fetcher
require Logger
# alias Explorer.Chain.Events.Publisher
# TODO: publish event when new committed batches appear
alias Indexer.Fetcher.ZkSync.Discovery.Workers
alias Indexer.Fetcher.ZkSync.StatusTracking.{Committed, Executed, Proven}
def child_spec(start_link_arguments) do
spec = %{
id: __MODULE__,
start: {__MODULE__, :start_link, start_link_arguments},
restart: :transient,
type: :worker
}
Supervisor.child_spec(spec, [])
end
def start_link(args, gen_server_options \\ []) do
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__))
end
@impl GenServer
def init(args) do
Logger.metadata(fetcher: :zksync_batches_tracker)
config_tracker = Application.get_all_env(:indexer)[Indexer.Fetcher.ZkSync.BatchesStatusTracker]
l1_rpc = config_tracker[:zksync_l1_rpc]
recheck_interval = config_tracker[:recheck_interval]
config_fetcher = Application.get_all_env(:indexer)[Indexer.Fetcher.ZkSync.TransactionBatch]
chunk_size = config_fetcher[:chunk_size]
batches_max_range = config_fetcher[:batches_max_range]
Process.send(self(), :check_committed, [])
{:ok,
%{
config: %{
json_l2_rpc_named_arguments: args[:json_rpc_named_arguments],
json_l1_rpc_named_arguments: [
transport: EthereumJSONRPC.HTTP,
transport_options: [
http: EthereumJSONRPC.HTTP.HTTPoison,
url: l1_rpc,
http_options: [
recv_timeout: :timer.minutes(10),
timeout: :timer.minutes(10),
hackney: [pool: :ethereum_jsonrpc]
]
]
],
recheck_interval: recheck_interval,
chunk_size: chunk_size,
batches_max_range: batches_max_range
},
data: %{}
}}
end
@impl GenServer
def handle_info({ref, _result}, state) do
Process.demonitor(ref, [:flush])
{:noreply, state}
end
# Handles the `:check_historical` message to download historical batches from RPC if necessary and
# import them to the `zksync_transaction_batches` table. The batches are imported together with L1
# transactions associations, rollup blocks and transactions.
# Since it is the final handler in the loop, it schedules sending the `:check_committed` message
# to initiate the next iteration. The sending of the message is delayed, taking into account
# the time remaining after the previous handlers' execution.
#
# ## Parameters
# - `:check_historical`: the message triggering the handler
# - `state`: current state of the fetcher containing both the fetcher configuration
# and data re-used by different handlers.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` contains `data` empty
@impl GenServer
def handle_info(:check_historical, state)
when is_map(state) and is_map_key(state, :config) and is_map_key(state, :data) and
is_map_key(state.config, :recheck_interval) and is_map_key(state.config, :batches_max_range) and
is_map_key(state.config, :json_l2_rpc_named_arguments) and
is_map_key(state.config, :chunk_size) do
{handle_duration, _} =
:timer.tc(&Workers.batches_catchup/1, [
%{
batches_max_range: state.config.batches_max_range,
chunk_size: state.config.chunk_size,
json_rpc_named_arguments: state.config.json_l2_rpc_named_arguments
}
])
Process.send_after(
self(),
:check_committed,
max(:timer.seconds(state.config.recheck_interval) - div(update_duration(state.data, handle_duration), 1000), 0)
)
{:noreply, %{state | data: %{}}}
end
# Handles the `:recover_batches` message to download a set of batches from RPC and imports them
# to the `zksync_transaction_batches` table. It is expected that the message is sent from handlers updating
# batches statuses when they discover the absence of batches in the `zksync_transaction_batches` table.
# The batches are imported together with L1 transactions associations, rollup blocks, and transactions.
#
# ## Parameters
# - `:recover_batches`: the message triggering the handler
# - `state`: current state of the fetcher containing both the fetcher configuration
# and data related to the batches recovery:
# - `state.data.batches`: list of the batches to recover
# - `state.data.switched_from`: the message to send after the batch recovery
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` contains updated `duration` of the iteration
@impl GenServer
def handle_info(:recover_batches, state)
when is_map(state) and is_map_key(state, :config) and is_map_key(state, :data) and
is_map_key(state.config, :json_l2_rpc_named_arguments) and is_map_key(state.config, :chunk_size) and
is_map_key(state.data, :batches) and is_map_key(state.data, :switched_from) do
{handle_duration, _} =
:timer.tc(
&Workers.get_full_batches_info_and_import/2,
[
state.data.batches,
%{
chunk_size: state.config.chunk_size,
json_rpc_named_arguments: state.config.json_l2_rpc_named_arguments
}
]
)
Process.send(self(), state.data.switched_from, [])
{:noreply, %{state | data: %{duration: update_duration(state.data, handle_duration)}}}
end
# Handles `:check_committed`, `:check_proven`, and `:check_executed` messages to update the
# statuses of batches by associating L1 transactions with them. For different messages, it invokes
# different underlying functions due to different natures of discovering batches with changed status.
# Another reason why statuses are being tracked differently is the different pace of status changes:
# a batch is committed in a few minutes after sealing, proven in a few hours, and executed once in a day.
# Depending on the value returned from the underlying function, either a message (`:check_proven`,
# `:check_executed`, or `:check_historical`) to switch to the next status checker is sent, or a list
# of batches to recover is provided together with `:recover_batches`.
#
# ## Parameters
# - `input`: one of `:check_committed`, `:check_proven`, and `:check_executed`
# - `state`: the current state of the fetcher containing both the fetcher configuration
# and data reused by different handlers.
#
# ## Returns
# - `{:noreply, new_state}` where `new_state` contains the updated `duration` of the iteration,
# could also contain the list of batches to recover and the message to return back to
# the corresponding status update checker.
@impl GenServer
def handle_info(input, state)
when input in [:check_committed, :check_proven, :check_executed] do
{output, func} =
case input do
:check_committed -> {:check_proven, &Committed.look_for_batches_and_update/1}
:check_proven -> {:check_executed, &Proven.look_for_batches_and_update/1}
:check_executed -> {:check_historical, &Executed.look_for_batches_and_update/1}
end
{handle_duration, result} = :timer.tc(func, [state.config])
{switch_to, state_data} =
case result do
:ok ->
{output, %{duration: update_duration(state.data, handle_duration)}}
{:recovery_required, batches} ->
{:recover_batches,
%{
switched_from: input,
batches: batches,
duration: update_duration(state.data, handle_duration)
}}
end
Process.send(self(), switch_to, [])
{:noreply, %{state | data: state_data}}
end
defp update_duration(data, cur_duration) do
if Map.has_key?(data, :duration) do
data.duration + cur_duration
else
cur_duration
end
end
end

@ -0,0 +1,413 @@
defmodule Indexer.Fetcher.ZkSync.Discovery.BatchesData do
@moduledoc """
Provides main functionality to extract data for batches and associated with them
rollup blocks, rollup and L1 transactions.
"""
alias EthereumJSONRPC.Block.ByNumber
alias Indexer.Fetcher.ZkSync.Utils.Rpc
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1, log_details_chunk_handling: 4]
import EthereumJSONRPC, only: [quantity_to_integer: 1]
@doc """
Downloads batches, associates rollup blocks and transactions, and imports the results into the database.
Data is retrieved from the RPC endpoint in chunks of `chunk_size`.
## Parameters
- `batches`: Either a tuple of two integers, `start_batch_number` and `end_batch_number`, defining
the range of batches to receive, or a list of batch numbers, `batches_list`.
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint,
and `json_rpc_named_arguments` defining parameters for the RPC connection.
## Returns
- `{batches_to_import, l2_blocks_to_import, l2_txs_to_import}`
where
- `batches_to_import` is a map of batches data
- `l2_blocks_to_import` is a list of blocks associated with batches by batch numbers
- `l2_txs_to_import` is a list of transactions associated with batches by batch numbers
"""
@spec extract_data_from_batches([integer()] | {integer(), integer()}, %{
:chunk_size => pos_integer(),
:json_rpc_named_arguments => any(),
optional(any()) => any()
}) :: {map(), list(), list()}
def extract_data_from_batches(batches, config)
def extract_data_from_batches({start_batch_number, end_batch_number}, config)
when is_integer(start_batch_number) and is_integer(end_batch_number) and
is_map(config) do
start_batch_number..end_batch_number
|> Enum.to_list()
|> do_extract_data_from_batches(config)
end
def extract_data_from_batches(batches_list, config)
when is_list(batches_list) and
is_map(config) do
batches_list
|> do_extract_data_from_batches(config)
end
defp do_extract_data_from_batches(batches_list, config) when is_list(batches_list) do
initial_batches_to_import = collect_batches_details(batches_list, config)
log_info("Collected details for #{length(Map.keys(initial_batches_to_import))} batches")
batches_to_import = get_block_ranges(initial_batches_to_import, config)
{l2_blocks_to_import, l2_txs_to_import} = get_l2_blocks_and_transactions(batches_to_import, config)
log_info("Linked #{length(l2_blocks_to_import)} L2 blocks and #{length(l2_txs_to_import)} L2 transactions")
{batches_to_import, l2_blocks_to_import, l2_txs_to_import}
end
@doc """
Collects all unique L1 transactions from the given list of batches, including transactions
that change the status of a batch and their timestamps.
**Note**: Every map describing an L1 transaction in the response is not ready for importing into
the database since it does not contain `:id` elements.
## Parameters
- `batches`: A list of maps describing batches. Each map is expected to define the following
elements: `commit_tx_hash`, `commit_timestamp`, `prove_tx_hash`, `prove_timestamp`,
`executed_tx_hash`, `executed_timestamp`.
## Returns
- `l1_txs`: A map where keys are L1 transaction hashes, and values are maps containing
transaction hashes and timestamps.
"""
@spec collect_l1_transactions(list()) :: map()
def collect_l1_transactions(batches)
when is_list(batches) do
l1_txs =
batches
|> Enum.reduce(%{}, fn batch, l1_txs ->
[
%{hash: batch.commit_tx_hash, timestamp: batch.commit_timestamp},
%{hash: batch.prove_tx_hash, timestamp: batch.prove_timestamp},
%{hash: batch.executed_tx_hash, timestamp: batch.executed_timestamp}
]
|> Enum.reduce(l1_txs, fn l1_tx, acc ->
# checks if l1_tx is not empty and adds to acc
add_l1_tx_to_list(acc, l1_tx)
end)
end)
log_info("Collected #{length(Map.keys(l1_txs))} L1 hashes")
l1_txs
end
defp add_l1_tx_to_list(l1_txs, l1_tx) do
if l1_tx.hash != Rpc.get_binary_zero_hash() do
Map.put(l1_txs, l1_tx.hash, l1_tx)
else
l1_txs
end
end
# Divides the list of batch numbers into chunks of size `chunk_size` to combine
# `zks_getL1BatchDetails` calls in one chunk together. To simplify further handling,
# each call is combined with the batch number in the JSON request identifier field.
# This allows parsing and associating every response with a particular batch, producing
# a list of maps describing the batches, ready for further handling.
#
# **Note**: The batches in the resulting map are not ready for importing into the DB. L1 transaction
# indices as well as the rollup blocks range must be added, and then batch descriptions
# must be pruned (see Indexer.Fetcher.ZkSync.Utils.Db.prune_json_batch/1).
#
# ## Parameters
# - `batches_list`: A list of batch numbers.
# - `config`: A map containing `chunk_size` specifying the number of `zks_getL1BatchDetails` in
# one HTTP request, and `json_rpc_named_arguments` describing parameters for
# RPC connection.
#
# ## Returns
# - `batches_details`: A map where keys are batch numbers, and values are maps produced
# after parsing responses of `zks_getL1BatchDetails` calls.
defp collect_batches_details(
batches_list,
%{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config
)
when is_list(batches_list) do
batches_list_length = length(batches_list)
{batches_details, _} =
batches_list
|> Enum.chunk_every(chunk_size)
|> Enum.reduce({%{}, 0}, fn chunk, {details, a} ->
log_details_chunk_handling("Collecting details", chunk, a * chunk_size, batches_list_length)
requests =
chunk
|> Enum.map(fn batch_number ->
EthereumJSONRPC.request(%{
id: batch_number,
method: "zks_getL1BatchDetails",
params: [batch_number]
})
end)
details =
requests
|> Rpc.fetch_batches_details(json_rpc_named_arguments)
|> Enum.reduce(
details,
fn resp, details ->
Map.put(details, resp.id, Rpc.transform_batch_details_to_map(resp.result))
end
)
{details, a + 1}
end)
batches_details
end
# Extends each batch description with the block numbers specifying the start and end of
# a range of blocks included in the batch. The block ranges are obtained through the RPC call
# `zks_getL1BatchBlockRange`. The calls are combined in chunks of `chunk_size`. To distinguish
# each call in the chunk, they are combined with the batch number in the JSON request
# identifier field.
#
# ## Parameters
# - `batches`: A map of batch descriptions.
# - `config`: A map containing `chunk_size`, specifying the number of `zks_getL1BatchBlockRange`
# in one HTTP request, and `json_rpc_named_arguments` describing parameters for
# RPC connection.
#
# ## Returns
# - `updated_batches`: A map of batch descriptions where each description is updated with
# a range (elements `:start_block` and `:end_block`) of rollup blocks included in the batch.
defp get_block_ranges(
batches,
%{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config
)
when is_map(batches) do
keys = Map.keys(batches)
batches_list_length = length(keys)
{updated_batches, _} =
keys
|> Enum.chunk_every(chunk_size)
|> Enum.reduce({batches, 0}, fn batches_chunk, {batches_with_block_ranges, a} ->
log_details_chunk_handling("Collecting block ranges", batches_chunk, a * chunk_size, batches_list_length)
{request_block_ranges_for_batches(batches_chunk, batches, batches_with_block_ranges, json_rpc_named_arguments),
a + 1}
end)
updated_batches
end
# For a given list of rollup batch numbers, this function builds a list of requests
# to `zks_getL1BatchBlockRange`, executes them, and extends the batches' descriptions with
# ranges of rollup blocks associated with each batch.
#
# ## Parameters
# - `batches_numbers`: A list with batch numbers.
# - `batches_src`: A list containing original batches descriptions.
# - `batches_dst`: A map with extended batch descriptions containing rollup block ranges.
# - `json_rpc_named_arguments`: Describes parameters for RPC connection.
#
# ## Returns
# - An updated version of `batches_dst` with new entities containing rollup block ranges.
defp request_block_ranges_for_batches(batches_numbers, batches_src, batches_dst, json_rpc_named_arguments) do
batches_numbers
|> Enum.reduce([], fn batch_number, requests ->
batch = Map.get(batches_src, batch_number)
# Prepare requests list to get blocks ranges
case is_nil(batch.start_block) or is_nil(batch.end_block) do
true ->
[
EthereumJSONRPC.request(%{
id: batch_number,
method: "zks_getL1BatchBlockRange",
params: [batch_number]
})
| requests
]
false ->
requests
end
end)
|> Rpc.fetch_blocks_ranges(json_rpc_named_arguments)
|> Enum.reduce(batches_dst, fn resp, updated_batches ->
Map.update!(updated_batches, resp.id, fn batch ->
[start_block, end_block] = resp.result
Map.merge(batch, %{
start_block: quantity_to_integer(start_block),
end_block: quantity_to_integer(end_block)
})
end)
end)
end
# Unfolds the ranges of rollup blocks in each batch description, makes RPC `eth_getBlockByNumber` calls,
# and builds two lists: a list of rollup blocks associated with each batch and a list of rollup transactions
# associated with each batch. RPC calls are made in chunks of `chunk_size`. To distinguish
# each call in the chunk, they are combined with the block number in the JSON request
# identifier field.
#
# ## Parameters
# - `batches`: A map of batch descriptions. Each description must contain `start_block` and
# `end_block`, specifying the range of blocks associated with the batch.
# - `config`: A map containing `chunk_size`, specifying the number of `eth_getBlockByNumber`
# in one HTTP request, and `json_rpc_named_arguments` describing parameters for
# RPC connection.
#
# ## Returns
# - {l2_blocks_to_import, l2_txs_to_import}, where
# - `l2_blocks_to_import` contains a list of all rollup blocks with their associations with
# the provided batches. The association is a map with the block hash and the batch number.
# - `l2_txs_to_import` contains a list of all rollup transactions with their associations
# with the provided batches. The association is a map with the transaction hash and
# the batch number.
defp get_l2_blocks_and_transactions(
batches,
%{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config
) do
# Extracts the rollup block range for every batch, unfolds it and
# build chunks of `eth_getBlockByNumber` calls
{blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size} =
batches
|> Map.keys()
|> Enum.reduce({%{}, [], [], 0}, fn batch_number, cur_batch_acc ->
batch = Map.get(batches, batch_number)
batch.start_block..batch.end_block
|> Enum.chunk_every(chunk_size)
|> Enum.reduce(cur_batch_acc, fn blocks_range, cur_chunk_acc ->
build_blocks_map_and_chunks_of_rpc_requests(batch_number, blocks_range, cur_chunk_acc, chunk_size)
end)
end)
# After the last iteration of the reduce loop it is a valid case
# when the calls from the last chunk are not in the chunks list,
# so it is appended
finalized_chunked_requests =
if cur_chunk_size > 0 do
[cur_chunk | chunked_requests]
else
chunked_requests
end
# The chunks requests are sent to the RPC node and parsed to
# extract rollup block hashes and rollup transactions.
{blocks_associations, l2_txs_to_import} =
finalized_chunked_requests
|> Enum.reduce({blocks_to_batches, []}, fn requests, {blocks, l2_txs} ->
requests
|> Rpc.fetch_blocks_details(json_rpc_named_arguments)
|> extract_block_hash_and_transactions_list(blocks, l2_txs)
end)
# Check that amount of received transactions for a batch is correct
batches
|> Map.keys()
|> Enum.each(fn batch_number ->
batch = Map.get(batches, batch_number)
txs_in_batch = batch.l1_tx_count + batch.l2_tx_count
^txs_in_batch =
Enum.count(l2_txs_to_import, fn tx ->
tx.batch_number == batch_number
end)
end)
{Map.values(blocks_associations), l2_txs_to_import}
end
# For a given list of rollup block numbers, this function extends:
# - a map containing the linkage between rollup block numbers and batch numbers
# - a list of chunks of `eth_getBlockByNumber` requests
# - an uncompleted chunk of `eth_getBlockByNumber` requests
#
# ## Parameters
# - `batch_number`: The number of the batch to which the list of rollup blocks is linked.
# - `blocks_numbers`: A list of rollup block numbers.
# - `cur_chunk_acc`: The current state of the accumulator containing:
# - the current state of the map containing the linkage between rollup block numbers and batch numbers
# - the current state of the list of chunks of `eth_getBlockByNumber` requests
# - the current state of the uncompleted chunk of `eth_getBlockByNumber` requests
# - the size of the uncompleted chunk
# - `chunk_size`: The maximum size of the chunk of `eth_getBlockByNumber` requests
#
# ## Returns
# - {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size}, where:
# - `blocks_to_batches`: An updated map with new blocks added.
# - `chunked_requests`: An updated list of lists of `eth_getBlockByNumber` requests.
# - `cur_chunk`: An uncompleted chunk of `eth_getBlockByNumber` requests or an empty list.
# - `cur_chunk_size`: The size of the uncompleted chunk.
defp build_blocks_map_and_chunks_of_rpc_requests(batch_number, blocks_numbers, cur_chunk_acc, chunk_size) do
blocks_numbers
|> Enum.reduce(cur_chunk_acc, fn block_number, {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size} ->
blocks_to_batches = Map.put(blocks_to_batches, block_number, %{batch_number: batch_number})
cur_chunk = [
ByNumber.request(
%{
id: block_number,
number: block_number
},
false
)
| cur_chunk
]
if cur_chunk_size + 1 == chunk_size do
{blocks_to_batches, [cur_chunk | chunked_requests], [], 0}
else
{blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size + 1}
end
end)
end
# Parses responses from `eth_getBlockByNumber` calls and extracts the block hash and the
# transactions lists. The block hash and transaction hashes are used to build associations
# with the corresponding batches by utilizing their numbers.
#
# This function is not part of the `Indexer.Fetcher.ZkSync.Utils.Rpc` module since the resulting
# lists are too specific for further import to the database.
#
# ## Parameters
# - `json_responses`: A list of responses to `eth_getBlockByNumber` calls.
# - `l2_blocks`: A map of accumulated associations between rollup blocks and batches.
# - `l2_txs`: A list of accumulated associations between rollup transactions and batches.
#
# ## Returns
# - {l2_blocks, l2_txs}, where
# - `l2_blocks`: Updated map of accumulated associations between rollup blocks and batches.
# - `l2_txs`: Updated list of accumulated associations between rollup transactions and batches.
defp extract_block_hash_and_transactions_list(json_responses, l2_blocks, l2_txs) do
json_responses
|> Enum.reduce({l2_blocks, l2_txs}, fn resp, {l2_blocks, l2_txs} ->
{block, l2_blocks} =
Map.get_and_update(l2_blocks, resp.id, fn block ->
{block, Map.put(block, :hash, Map.get(resp.result, "hash"))}
end)
l2_txs =
case Map.get(resp.result, "transactions") do
nil ->
l2_txs
new_txs ->
Enum.reduce(new_txs, l2_txs, fn l2_tx_hash, l2_txs ->
[
%{
batch_number: block.batch_number,
hash: l2_tx_hash
}
| l2_txs
]
end)
end
{l2_blocks, l2_txs}
end)
end
end

@ -0,0 +1,163 @@
defmodule Indexer.Fetcher.ZkSync.Discovery.Workers do
@moduledoc """
Provides functions to download a set of batches from RPC and import them to DB.
"""
alias Indexer.Fetcher.ZkSync.Utils.Db
import Indexer.Fetcher.ZkSync.Discovery.BatchesData,
only: [
collect_l1_transactions: 1,
extract_data_from_batches: 2
]
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1]
@doc """
Downloads minimal batches data (batch, associated rollup blocks and transactions hashes) from RPC
and imports them to the DB. Data is retrieved from the RPC endpoint in chunks of `chunk_size`.
Import of associated L1 transactions does not happen, assuming that the batch import happens regularly
enough and last downloaded batches does not contain L1 associations anyway.
Later `Indexer.Fetcher.ZkSync.BatchesStatusTracker` will update any batch state changes and
import required L1 transactions.
## Parameters
- `start_batch_number`: The first batch in the range to download.
- `end_batch_number`: The last batch in the range to download.
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint,
and `json_rpc_named_arguments` defining parameters for the RPC connection.
## Returns
- `:ok`
"""
@spec get_minimal_batches_info_and_import(non_neg_integer(), non_neg_integer(), %{
:chunk_size => integer(),
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
}) :: :ok
def get_minimal_batches_info_and_import(start_batch_number, end_batch_number, config)
when is_integer(start_batch_number) and
is_integer(end_batch_number) and
(is_map(config) and is_map_key(config, :json_rpc_named_arguments) and
is_map_key(config, :chunk_size)) do
{batches_to_import, l2_blocks_to_import, l2_txs_to_import} =
extract_data_from_batches({start_batch_number, end_batch_number}, config)
batches_list_to_import =
batches_to_import
|> Map.values()
|> Enum.reduce([], fn batch, batches_list ->
[Db.prune_json_batch(batch) | batches_list]
end)
Db.import_to_db(
batches_list_to_import,
[],
l2_txs_to_import,
l2_blocks_to_import
)
:ok
end
@doc """
Downloads batches, associates L1 transactions, rollup blocks and transactions with the given list of batch numbers,
and imports the results into the database. Data is retrieved from the RPC endpoint in chunks of `chunk_size`.
## Parameters
- `batches_numbers_list`: List of batch numbers to be retrieved.
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint,
and `json_rpc_named_arguments` defining parameters for the RPC connection.
## Returns
- `:ok`
"""
@spec get_full_batches_info_and_import([integer()], %{
:chunk_size => integer(),
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
}) :: :ok
def get_full_batches_info_and_import(batches_numbers_list, config)
when is_list(batches_numbers_list) and
(is_map(config) and is_map_key(config, :json_rpc_named_arguments) and
is_map_key(config, :chunk_size)) do
# Collect batches and linked L2 blocks and transaction
{batches_to_import, l2_blocks_to_import, l2_txs_to_import} = extract_data_from_batches(batches_numbers_list, config)
# Collect L1 transactions associated with batches
l1_txs =
batches_to_import
|> Map.values()
|> collect_l1_transactions()
|> Db.get_indices_for_l1_transactions()
# Update batches with l1 transactions indices and prune unnecessary fields
batches_list_to_import =
batches_to_import
|> Map.values()
|> Enum.reduce([], fn batch, batches ->
[
batch
|> Map.put(:commit_id, get_l1_tx_id_by_hash(l1_txs, batch.commit_tx_hash))
|> Map.put(:prove_id, get_l1_tx_id_by_hash(l1_txs, batch.prove_tx_hash))
|> Map.put(:execute_id, get_l1_tx_id_by_hash(l1_txs, batch.executed_tx_hash))
|> Db.prune_json_batch()
| batches
]
end)
Db.import_to_db(
batches_list_to_import,
Map.values(l1_txs),
l2_txs_to_import,
l2_blocks_to_import
)
:ok
end
@doc """
Retrieves the minimal batch number from the database. If the minimum batch number is not zero,
downloads `batches_max_range` batches older than the retrieved batch, along with associated
L1 transactions, rollup blocks, and transactions, and imports everything to the database.
## Parameters
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from
the RPC endpoint and `json_rpc_named_arguments` defining parameters for the
RPC connection, `batches_max_range` defines how many of older batches must be downloaded.
## Returns
- `:ok`
"""
@spec batches_catchup(%{
:batches_max_range => integer(),
:chunk_size => integer(),
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
}) :: :ok
def batches_catchup(config)
when is_map(config) and is_map_key(config, :json_rpc_named_arguments) and
is_map_key(config, :batches_max_range) and
is_map_key(config, :chunk_size) do
oldest_batch_number = Db.get_earliest_batch_number()
if not is_nil(oldest_batch_number) && oldest_batch_number > 0 do
log_info("The oldest batch number is not zero. Historical baches will be fetched.")
start_batch_number = max(0, oldest_batch_number - config.batches_max_range)
end_batch_number = oldest_batch_number - 1
start_batch_number..end_batch_number
|> Enum.to_list()
|> get_full_batches_info_and_import(config)
end
:ok
end
defp get_l1_tx_id_by_hash(l1_txs, hash) do
l1_txs
|> Map.get(hash)
|> Kernel.||(%{id: nil})
|> Map.get(:id)
end
end

@ -0,0 +1,78 @@
defmodule Indexer.Fetcher.ZkSync.StatusTracking.Committed do
@moduledoc """
Functionality to discover committed batches
"""
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc}
import Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils,
only: [
check_if_batch_status_changed: 3,
associate_and_import_or_prepare_for_recovery: 4
]
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1]
# keccak256("BlockCommit(uint256,bytes32,bytes32)")
@block_commit_event "0x8f2916b2f2d78cc5890ead36c06c0f6d5d112c7e103589947e8e2f0d6eddb763"
@doc """
Checks if the oldest uncommitted batch in the database has the associated L1 commitment transaction
by requesting new batch details from RPC. If so, analyzes the `BlockCommit` event emitted by
the transaction to explore all the batches committed by it. For all discovered batches, it updates
the database with new associations, importing information about L1 transactions.
If it is found that some of the discovered batches are absent in the database, the function
interrupts and returns the list of batch numbers that can be attempted to be recovered.
## Parameters
- `config`: Configuration containing `json_l1_rpc_named_arguments` and
`json_l2_rpc_named_arguments` defining parameters for the RPC connections.
## Returns
- `:ok` if no new committed batches are found, or if all found batches and the corresponding L1
transactions are imported successfully.
- `{:recovery_required, batches_to_recover}` if the absence of new committed batches is
discovered; `batches_to_recover` contains the list of batch numbers.
"""
@spec look_for_batches_and_update(%{
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
}) :: :ok | {:recovery_required, list()}
def look_for_batches_and_update(
%{
json_l1_rpc_named_arguments: json_l1_rpc_named_arguments,
json_l2_rpc_named_arguments: json_l2_rpc_named_arguments
} = _config
) do
case Db.get_earliest_sealed_batch_number() do
nil ->
:ok
expected_batch_number ->
log_info("Checking if the batch #{expected_batch_number} was committed")
{next_action, tx_hash, l1_txs} =
check_if_batch_status_changed(expected_batch_number, :commit_tx, json_l2_rpc_named_arguments)
case next_action do
:skip ->
:ok
:look_for_batches ->
log_info("The batch #{expected_batch_number} looks like committed")
commit_tx_receipt = Rpc.fetch_tx_receipt_by_hash(tx_hash, json_l1_rpc_named_arguments)
batches_numbers_from_rpc = get_committed_batches_from_logs(commit_tx_receipt["logs"])
associate_and_import_or_prepare_for_recovery(batches_numbers_from_rpc, l1_txs, tx_hash, :commit_id)
end
end
end
defp get_committed_batches_from_logs(logs) do
committed_batches = Rpc.filter_logs_and_extract_topic_at(logs, @block_commit_event, 1)
log_info("Discovered #{length(committed_batches)} committed batches in the commitment tx")
committed_batches
end
end

@ -0,0 +1,173 @@
defmodule Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils do
@moduledoc """
Common functions for status changes trackers
"""
alias Explorer.Chain.ZkSync.Reader
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc}
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_warning: 1]
@doc """
Fetches the details of the batch with the given number and checks if the representation of
the same batch in the database refers to the same commitment, proving, or executing transaction
depending on `tx_type`. If the transaction state changes, the new transaction is prepared for
import to the database.
## Parameters
- `batch_number`: the number of the batch to check L1 transaction state.
- `tx_type`: a type of the transaction to check, one of :commit_tx, :execute_tx, or :prove_tx.
- `json_l2_rpc_named_arguments`: parameters for the RPC connections.
## Returns
- `{:look_for_batches, l1_tx_hash, l1_txs}` where
- `l1_tx_hash` is the hash of the L1 transaction.
- `l1_txs` is a map containing the transaction hash as a key, and values are maps
with transaction hashes and transaction timestamps.
- `{:skip, "", %{}}` means the batch is not found in the database or the state of the transaction
in the batch representation is the same as the state of the transaction for the batch
received from RPC.
"""
@spec check_if_batch_status_changed(
binary() | non_neg_integer(),
:commit_tx | :execute_tx | :prove_tx,
EthereumJSONRPC.json_rpc_named_arguments()
) :: {:look_for_batches, any(), any()} | {:skip, <<>>, %{}}
def check_if_batch_status_changed(batch_number, tx_type, json_l2_rpc_named_arguments)
when (is_binary(batch_number) or is_integer(batch_number)) and
tx_type in [:commit_tx, :prove_tx, :execute_tx] and
is_list(json_l2_rpc_named_arguments) do
batch_from_rpc = Rpc.fetch_batch_details_by_batch_number(batch_number, json_l2_rpc_named_arguments)
status_changed_or_error =
case Reader.batch(
batch_number,
necessity_by_association: %{
get_association(tx_type) => :optional
}
) do
{:ok, batch_from_db} -> transactions_of_batch_changed?(batch_from_db, batch_from_rpc, tx_type)
{:error, :not_found} -> :error
end
l1_tx = get_l1_tx_from_batch(batch_from_rpc, tx_type)
if l1_tx.hash != Rpc.get_binary_zero_hash() and status_changed_or_error in [true, :error] do
l1_txs = Db.get_indices_for_l1_transactions(%{l1_tx.hash => l1_tx})
{:look_for_batches, l1_tx.hash, l1_txs}
else
{:skip, "", %{}}
end
end
defp get_association(tx_type) do
case tx_type do
:commit_tx -> :commit_transaction
:prove_tx -> :prove_transaction
:execute_tx -> :execute_transaction
end
end
defp transactions_of_batch_changed?(batch_db, batch_json, tx_type) do
tx_hash_json =
case tx_type do
:commit_tx -> batch_json.commit_tx_hash
:prove_tx -> batch_json.prove_tx_hash
:execute_tx -> batch_json.executed_tx_hash
end
tx_hash_db =
case tx_type do
:commit_tx -> batch_db.commit_transaction
:prove_tx -> batch_db.prove_transaction
:execute_tx -> batch_db.execute_transaction
end
tx_hash_db_bytes =
if is_nil(tx_hash_db) do
Rpc.get_binary_zero_hash()
else
tx_hash_db.hash.bytes
end
tx_hash_json != tx_hash_db_bytes
end
defp get_l1_tx_from_batch(batch_from_rpc, tx_type) do
case tx_type do
:commit_tx -> %{hash: batch_from_rpc.commit_tx_hash, timestamp: batch_from_rpc.commit_timestamp}
:prove_tx -> %{hash: batch_from_rpc.prove_tx_hash, timestamp: batch_from_rpc.prove_timestamp}
:execute_tx -> %{hash: batch_from_rpc.executed_tx_hash, timestamp: batch_from_rpc.executed_timestamp}
end
end
@doc """
Receives batches from the database, establishes an association between each batch and
the corresponding L1 transactions, and imports batches and L1 transactions into the database.
If the number of batches returned from the database does not match the requested batches,
the initial list of batch numbers is returned, assuming that they can be
used for the missed batch recovery procedure.
## Parameters
- `batches_numbers`: the list of batch numbers that must be updated.
- `l1_txs`: a map containing transaction hashes as keys, and values are maps
with transaction hashes and transaction timestamps of L1 transactions to import to the database.
- `tx_hash`: the hash of the L1 transaction to build an association with.
- `association_key`: the field in the batch description to build an association with L1
transactions.
## Returns
- `:ok` if batches and the corresponding L1 transactions are imported successfully.
- `{:recovery_required, batches_to_recover}` if the absence of batches is discovered;
`batches_to_recover` contains the list of batch numbers.
"""
@spec associate_and_import_or_prepare_for_recovery([integer()], map(), binary(), :commit_id | :execute_id | :prove_id) ::
:ok | {:recovery_required, [integer()]}
def associate_and_import_or_prepare_for_recovery(batches_numbers, l1_txs, tx_hash, association_key)
when is_list(batches_numbers) and is_map(l1_txs) and is_binary(tx_hash) and
association_key in [:commit_id, :prove_id, :execute_id] do
case prepare_batches_to_import(batches_numbers, %{association_key => l1_txs[tx_hash][:id]}) do
{:error, batches_to_recover} ->
{:recovery_required, batches_to_recover}
{:ok, batches_to_import} ->
Db.import_to_db(batches_to_import, Map.values(l1_txs))
:ok
end
end
# Receives batches from the database and merges each batch's data with the data provided
# in `map_to_update`. If the number of batches returned from the database does not match
# with the requested batches, the initial list of batch numbers is returned, assuming that they
# can be used for the missed batch recovery procedure.
#
# ## Parameters
# - `batches`: the list of batch numbers that must be updated.
# - `map_to_update`: a map containing new data that must be applied to all requested batches.
#
# ## Returns
# - `{:ok, batches_to_import}` where `batches_to_import` is the list of batches ready to import
# with updated data.
# - `{:error, batches}` where `batches` contains the input list of batch numbers.
defp prepare_batches_to_import(batches, map_to_update) do
batches_from_db = Reader.batches(batches, [])
if length(batches_from_db) == length(batches) do
batches_to_import =
batches_from_db
|> Enum.reduce([], fn batch, batches ->
[
batch
|> Rpc.transform_transaction_batch_to_map()
|> Map.merge(map_to_update)
| batches
]
end)
{:ok, batches_to_import}
else
log_warning("Lack of batches received from DB to update")
{:error, batches}
end
end
end

@ -0,0 +1,78 @@
defmodule Indexer.Fetcher.ZkSync.StatusTracking.Executed do
@moduledoc """
Functionality to discover executed batches
"""
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc}
import Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils,
only: [
check_if_batch_status_changed: 3,
associate_and_import_or_prepare_for_recovery: 4
]
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1]
# keccak256("BlockExecution(uint256,bytes32,bytes32)")
@block_execution_event "0x2402307311a4d6604e4e7b4c8a15a7e1213edb39c16a31efa70afb06030d3165"
@doc """
Checks if the oldest unexecuted batch in the database has the associated L1 executing transaction
by requesting new batch details from RPC. If so, analyzes the `BlockExecution` event emitted by
the transaction to explore all the batches executed by it. For all discovered batches, it updates
the database with new associations, importing information about L1 transactions.
If it is found that some of the discovered batches are absent in the database, the function
interrupts and returns the list of batch numbers that can be attempted to be recovered.
## Parameters
- `config`: Configuration containing `json_l1_rpc_named_arguments` and
`json_l2_rpc_named_arguments` defining parameters for the RPC connections.
## Returns
- `:ok` if no new executed batches are found, or if all found batches and the corresponding L1
transactions are imported successfully.
- `{:recovery_required, batches_to_recover}` if the absence of new executed batches is
discovered; `batches_to_recover` contains the list of batch numbers.
"""
@spec look_for_batches_and_update(%{
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
}) :: :ok | {:recovery_required, list()}
def look_for_batches_and_update(
%{
json_l1_rpc_named_arguments: json_l1_rpc_named_arguments,
json_l2_rpc_named_arguments: json_l2_rpc_named_arguments
} = _config
) do
case Db.get_earliest_unexecuted_batch_number() do
nil ->
:ok
expected_batch_number ->
log_info("Checking if the batch #{expected_batch_number} was executed")
{next_action, tx_hash, l1_txs} =
check_if_batch_status_changed(expected_batch_number, :execute_tx, json_l2_rpc_named_arguments)
case next_action do
:skip ->
:ok
:look_for_batches ->
log_info("The batch #{expected_batch_number} looks like executed")
execute_tx_receipt = Rpc.fetch_tx_receipt_by_hash(tx_hash, json_l1_rpc_named_arguments)
batches_numbers_from_rpc = get_executed_batches_from_logs(execute_tx_receipt["logs"])
associate_and_import_or_prepare_for_recovery(batches_numbers_from_rpc, l1_txs, tx_hash, :execute_id)
end
end
end
defp get_executed_batches_from_logs(logs) do
executed_batches = Rpc.filter_logs_and_extract_topic_at(logs, @block_execution_event, 1)
log_info("Discovered #{length(executed_batches)} executed batches in the executing tx")
executed_batches
end
end

@ -0,0 +1,137 @@
defmodule Indexer.Fetcher.ZkSync.StatusTracking.Proven do
@moduledoc """
Functionality to discover proven batches
"""
alias ABI.{FunctionSelector, TypeDecoder}
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc}
import Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils,
only: [
check_if_batch_status_changed: 3,
associate_and_import_or_prepare_for_recovery: 4
]
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1]
@doc """
Checks if the oldest unproven batch in the database has the associated L1 proving transaction
by requesting new batch details from RPC. If so, analyzes the calldata of the transaction
to explore all the batches proven by it. For all discovered batches, it updates
the database with new associations, importing information about L1 transactions.
If it is found that some of the discovered batches are absent in the database, the function
interrupts and returns the list of batch numbers that can be attempted to be recovered.
## Parameters
- `config`: Configuration containing `json_l1_rpc_named_arguments` and
`json_l2_rpc_named_arguments` defining parameters for the RPC connections.
## Returns
- `:ok` if no new proven batches are found, or if all found batches and the corresponding L1
transactions are imported successfully.
- `{:recovery_required, batches_to_recover}` if the absence of new proven batches is
discovered; `batches_to_recover` contains the list of batch numbers.
"""
@spec look_for_batches_and_update(%{
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
:json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(),
optional(any()) => any()
}) :: :ok | {:recovery_required, list()}
def look_for_batches_and_update(
%{
json_l1_rpc_named_arguments: json_l1_rpc_named_arguments,
json_l2_rpc_named_arguments: json_l2_rpc_named_arguments
} = _config
) do
case Db.get_earliest_unproven_batch_number() do
nil ->
:ok
expected_batch_number ->
log_info("Checking if the batch #{expected_batch_number} was proven")
{next_action, tx_hash, l1_txs} =
check_if_batch_status_changed(expected_batch_number, :prove_tx, json_l2_rpc_named_arguments)
case next_action do
:skip ->
:ok
:look_for_batches ->
log_info("The batch #{expected_batch_number} looks like proven")
prove_tx = Rpc.fetch_tx_by_hash(tx_hash, json_l1_rpc_named_arguments)
batches_numbers_from_rpc = get_proven_batches_from_calldata(prove_tx["input"])
associate_and_import_or_prepare_for_recovery(batches_numbers_from_rpc, l1_txs, tx_hash, :prove_id)
end
end
end
defp get_proven_batches_from_calldata(calldata) do
"0x7f61885c" <> encoded_params = calldata
# /// @param batchNumber Rollup batch number
# /// @param batchHash Hash of L2 batch
# /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more
# /// @param numberOfLayer1Txs Number of priority operations to be processed
# /// @param priorityOperationsHash Hash of all priority operations from this batch
# /// @param l2LogsTreeRoot Root hash of tree that contains L2 -> L1 messages from this batch
# /// @param timestamp Rollup batch timestamp, have the same format as Ethereum batch constant
# /// @param commitment Verified input for the zkSync circuit
# struct StoredBatchInfo {
# uint64 batchNumber;
# bytes32 batchHash;
# uint64 indexRepeatedStorageChanges;
# uint256 numberOfLayer1Txs;
# bytes32 priorityOperationsHash;
# bytes32 l2LogsTreeRoot;
# uint256 timestamp;
# bytes32 commitment;
# }
# /// @notice Recursive proof input data (individual commitments are constructed onchain)
# struct ProofInput {
# uint256[] recursiveAggregationInput;
# uint256[] serializedProof;
# }
# proveBatches(StoredBatchInfo calldata _prevBatch, StoredBatchInfo[] calldata _committedBatches, ProofInput calldata _proof)
# IO.inspect(FunctionSelector.decode("proveBatches((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),(uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32)[],(uint256[],uint256[]))"))
[_prev_batch, proven_batches, _proof] =
TypeDecoder.decode(
Base.decode16!(encoded_params, case: :lower),
%FunctionSelector{
function: "proveBatches",
types: [
tuple: [
uint: 64,
bytes: 32,
uint: 64,
uint: 256,
bytes: 32,
bytes: 32,
uint: 256,
bytes: 32
],
array:
{:tuple,
[
uint: 64,
bytes: 32,
uint: 64,
uint: 256,
bytes: 32,
bytes: 32,
uint: 256,
bytes: 32
]},
tuple: [array: {:uint, 256}, array: {:uint, 256}]
]
}
)
log_info("Discovered #{length(proven_batches)} proven batches in the prove tx")
proven_batches
|> Enum.map(fn batch_info -> elem(batch_info, 0) end)
end
end

@ -0,0 +1,149 @@
defmodule Indexer.Fetcher.ZkSync.TransactionBatch do
@moduledoc """
Discovers new batches and populates the `zksync_transaction_batches` table.
Repetitiveness is supported by sending a `:continue` message to itself every `recheck_interval` seconds.
Each iteration compares the number of the last handled batch stored in the state with the
latest batch available on the RPC node. If the rollup progresses, all batches between the
last handled batch (exclusively) and the latest available batch (inclusively) are downloaded from RPC
in chunks of `chunk_size` and imported into the `zksync_transaction_batches` table. If the latest
available batch is too far from the last handled batch, only `batches_max_range` batches are downloaded.
"""
use GenServer
use Indexer.Fetcher
require Logger
alias Explorer.Chain.ZkSync.Reader
alias Indexer.Fetcher.ZkSync.Discovery.Workers
alias Indexer.Fetcher.ZkSync.Utils.Rpc
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1]
def child_spec(start_link_arguments) do
spec = %{
id: __MODULE__,
start: {__MODULE__, :start_link, start_link_arguments},
restart: :transient,
type: :worker
}
Supervisor.child_spec(spec, [])
end
def start_link(args, gen_server_options \\ []) do
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__))
end
@impl GenServer
def init(args) do
Logger.metadata(fetcher: :zksync_transaction_batches)
config = Application.get_all_env(:indexer)[Indexer.Fetcher.ZkSync.TransactionBatch]
chunk_size = config[:chunk_size]
recheck_interval = config[:recheck_interval]
batches_max_range = config[:batches_max_range]
Process.send(self(), :init, [])
{:ok,
%{
config: %{
chunk_size: chunk_size,
batches_max_range: batches_max_range,
json_rpc_named_arguments: args[:json_rpc_named_arguments],
recheck_interval: recheck_interval
},
data: %{latest_handled_batch_number: 0}
}}
end
@impl GenServer
def handle_info(:init, state) do
latest_handled_batch_number =
case Reader.latest_available_batch_number() do
nil ->
log_info("No batches found in DB. Will start with the latest batch available by RPC")
# The value received from RPC is decremented in order to not waste
# the first iteration of handling `:continue` message.
Rpc.fetch_latest_sealed_batch_number(state.config.json_rpc_named_arguments) - 1
latest_handled_batch_number ->
latest_handled_batch_number
end
Process.send_after(self(), :continue, 2000)
log_info("All batches including #{latest_handled_batch_number} are considered as handled")
{:noreply, %{state | data: %{latest_handled_batch_number: latest_handled_batch_number}}}
end
# Checks if the rollup progresses by comparing the recently stored batch
# with the latest batch received from RPC. If progress is detected, it downloads
# batches, builds their associations with rollup blocks and transactions, and
# imports the received data to the database. If the latest batch received from RPC
# is too far from the most recently stored batch, only `batches_max_range` batches
# are downloaded. All RPC calls to get batch details and receive transactions
# included in batches are made in chunks of `chunk_size`.
#
# After importing batch information, it schedules the next iteration by sending
# the `:continue` message. The sending of the message is delayed, taking into account
# the time remaining after downloading and importing processes.
#
# ## Parameters
# - `:continue`: The message triggering the handler.
# - `state`: The current state of the fetcher containing both the fetcher configuration
# and the latest handled batch number.
#
# ## Returns
# - `{:noreply, new_state}` where the latest handled batch number is updated with the largest
# of the batch numbers imported in the current iteration.
@impl GenServer
def handle_info(
:continue,
%{
data: %{latest_handled_batch_number: latest_handled_batch_number},
config: %{
batches_max_range: batches_max_range,
json_rpc_named_arguments: json_rpc_named_arguments,
recheck_interval: recheck_interval,
chunk_size: _
}
} = state
) do
log_info("Checking for a new batch or batches")
latest_sealed_batch_number = Rpc.fetch_latest_sealed_batch_number(json_rpc_named_arguments)
{new_state, handle_duration} =
if latest_handled_batch_number < latest_sealed_batch_number do
start_batch_number = latest_handled_batch_number + 1
end_batch_number = min(latest_sealed_batch_number, latest_handled_batch_number + batches_max_range)
log_info("Handling the batch range #{start_batch_number}..#{end_batch_number}")
{handle_duration, _} =
:timer.tc(&Workers.get_minimal_batches_info_and_import/3, [start_batch_number, end_batch_number, state.config])
{
%{state | data: %{latest_handled_batch_number: end_batch_number}},
div(handle_duration, 1000)
}
else
{state, 0}
end
Process.send_after(self(), :continue, max(:timer.seconds(recheck_interval) - handle_duration, 0))
{:noreply, new_state}
end
@impl GenServer
def handle_info({ref, _result}, state) do
Process.demonitor(ref, [:flush])
{:noreply, state}
end
end

@ -0,0 +1,204 @@
defmodule Indexer.Fetcher.ZkSync.Utils.Db do
@moduledoc """
Common functions to simplify DB routines for Indexer.Fetcher.ZkSync fetchers
"""
alias Explorer.Chain
alias Explorer.Chain.ZkSync.Reader
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_warning: 1, log_info: 1]
@json_batch_fields_absent_in_db_batch [
:commit_tx_hash,
:commit_timestamp,
:prove_tx_hash,
:prove_timestamp,
:executed_tx_hash,
:executed_timestamp
]
@doc """
Deletes elements in the batch description map to prepare the batch for importing to
the database.
## Parameters
- `batch_with_json_fields`: a map describing a batch with elements that could remain
after downloading batch details from RPC.
## Returns
- A map describing the batch compatible with the database import operation.
"""
@spec prune_json_batch(map()) :: map()
def prune_json_batch(batch_with_json_fields)
when is_map(batch_with_json_fields) do
Map.drop(batch_with_json_fields, @json_batch_fields_absent_in_db_batch)
end
@doc """
Gets the oldest imported batch number.
## Parameters
- none
## Returns
- A batch number or `nil` if there are no batches in the database.
"""
@spec get_earliest_batch_number() :: nil | non_neg_integer()
def get_earliest_batch_number do
case Reader.oldest_available_batch_number() do
nil ->
log_warning("No batches found in DB")
nil
value ->
value
end
end
@doc """
Gets the oldest imported batch number without an associated commitment L1 transaction.
## Parameters
- none
## Returns
- A batch number or `nil` in cases where there are no batches in the database or
all batches in the database are marked as committed.
"""
@spec get_earliest_sealed_batch_number() :: nil | non_neg_integer()
def get_earliest_sealed_batch_number do
case Reader.earliest_sealed_batch_number() do
nil ->
log_info("No uncommitted batches found in DB")
nil
value ->
value
end
end
@doc """
Gets the oldest imported batch number without an associated proving L1 transaction.
## Parameters
- none
## Returns
- A batch number or `nil` in cases where there are no batches in the database or
all batches in the database are marked as proven.
"""
@spec get_earliest_unproven_batch_number() :: nil | non_neg_integer()
def get_earliest_unproven_batch_number do
case Reader.earliest_unproven_batch_number() do
nil ->
log_info("No unproven batches found in DB")
nil
value ->
value
end
end
@doc """
Gets the oldest imported batch number without an associated executing L1 transaction.
## Parameters
- none
## Returns
- A batch number or `nil` in cases where there are no batches in the database or
all batches in the database are marked as executed.
"""
@spec get_earliest_unexecuted_batch_number() :: nil | non_neg_integer()
def get_earliest_unexecuted_batch_number do
case Reader.earliest_unexecuted_batch_number() do
nil ->
log_info("No not executed batches found in DB")
nil
value ->
value
end
end
@doc """
Indexes L1 transactions provided in the input map. For transactions that
are already in the database, existing indices are taken. For new transactions,
the next available indices are assigned.
## Parameters
- `new_l1_txs`: A map of L1 transaction descriptions. The keys of the map are
transaction hashes.
## Returns
- `l1_txs`: A map of L1 transaction descriptions. Each element is extended with
the key `:id`, representing the index of the L1 transaction in the
`zksync_lifecycle_l1_transactions` table.
"""
@spec get_indices_for_l1_transactions(map()) :: any()
def get_indices_for_l1_transactions(new_l1_txs)
when is_map(new_l1_txs) do
# Get indices for l1 transactions previously handled
l1_txs =
new_l1_txs
|> Map.keys()
|> Reader.lifecycle_transactions()
|> Enum.reduce(new_l1_txs, fn {hash, id}, txs ->
{_, txs} =
Map.get_and_update!(txs, hash.bytes, fn l1_tx ->
{l1_tx, Map.put(l1_tx, :id, id)}
end)
txs
end)
# Get the next index for the first new transaction based
# on the indices existing in DB
l1_tx_next_id = Reader.next_id()
# Assign new indices for the transactions which are not in
# the l1 transactions table yet
{updated_l1_txs, _} =
l1_txs
|> Map.keys()
|> Enum.reduce(
{l1_txs, l1_tx_next_id},
fn hash, {txs, next_id} ->
tx = txs[hash]
id = Map.get(tx, :id)
if is_nil(id) do
{Map.put(txs, hash, Map.put(tx, :id, next_id)), next_id + 1}
else
{txs, next_id}
end
end
)
updated_l1_txs
end
@doc """
Imports provided lists of batches and their associations with L1 transactions, rollup blocks,
and transactions to the database.
## Parameters
- `batches`: A list of maps with batch descriptions.
- `l1_txs`: A list of maps with L1 transaction descriptions. Optional.
- `l2_txs`: A list of maps with rollup transaction associations. Optional.
- `l2_blocks`: A list of maps with rollup block associations. Optional.
## Returns
n/a
"""
def import_to_db(batches, l1_txs \\ [], l2_txs \\ [], l2_blocks \\ [])
when is_list(batches) and is_list(l1_txs) and is_list(l2_txs) and is_list(l2_blocks) do
{:ok, _} =
Chain.import(%{
zksync_lifecycle_transactions: %{params: l1_txs},
zksync_transaction_batches: %{params: batches},
zksync_batch_transactions: %{params: l2_txs},
zksync_batch_blocks: %{params: l2_blocks},
timeout: :infinity
})
end
end

@ -0,0 +1,143 @@
defmodule Indexer.Fetcher.ZkSync.Utils.Logging do
@moduledoc """
Common logging functions for Indexer.Fetcher.ZkSync fetchers
"""
require Logger
@doc """
A helper function to log a message with warning severity. Uses `Logger.warning` facility.
## Parameters
- `msg`: a message to log
## Returns
`:ok`
"""
@spec log_warning(any()) :: :ok
def log_warning(msg) do
Logger.warning(msg)
end
@doc """
A helper function to log a message with info severity. Uses `Logger.info` facility.
## Parameters
- `msg`: a message to log
## Returns
`:ok`
"""
@spec log_info(any()) :: :ok
def log_info(msg) do
Logger.info(msg)
end
@doc """
A helper function to log a message with error severity. Uses `Logger.error` facility.
## Parameters
- `msg`: a message to log
## Returns
`:ok`
"""
@spec log_error(any()) :: :ok
def log_error(msg) do
Logger.error(msg)
end
@doc """
A helper function to log progress when handling batches in chunks.
## Parameters
- `prefix`: A prefix for the logging message.
- `chunk`: A list of batch numbers in the current chunk.
- `current_progress`: The total number of batches handled up to this moment.
- `total`: The total number of batches across all chunks.
## Returns
`:ok`
## Examples:
- `log_details_chunk_handling("A message", [1, 2, 3], 0, 10)` produces
`A message for batches 1..3. Progress 30%`
- `log_details_chunk_handling("A message", [2], 1, 10)` produces
`A message for batch 2. Progress 20%`
- `log_details_chunk_handling("A message", [35], 0, 1)` produces
`A message for batch 35.`
- `log_details_chunk_handling("A message", [45, 50, 51, 52, 60], 1, 1)` produces
`A message for batches 45, 50..52, 60.`
"""
@spec log_details_chunk_handling(binary(), list(), non_neg_integer(), non_neg_integer()) :: :ok
def log_details_chunk_handling(prefix, chunk, current_progress, total)
when is_binary(prefix) and is_list(chunk) and (is_integer(current_progress) and current_progress >= 0) and
(is_integer(total) and total > 0) do
chunk_length = length(chunk)
progress =
case chunk_length == total do
true ->
""
false ->
percentage =
(current_progress + chunk_length)
|> Decimal.div(total)
|> Decimal.mult(100)
|> Decimal.round(2)
|> Decimal.to_string()
" Progress: #{percentage}%"
end
if chunk_length == 1 do
log_info("#{prefix} for batch ##{Enum.at(chunk, 0)}.")
else
log_info("#{prefix} for batches #{Enum.join(shorten_numbers_list(chunk), ", ")}.#{progress}")
end
end
# Transform list of numbers to the list of string where consequent values
# are combined to be displayed as a range.
#
# ## Parameters
# - `msg`: a message to log
#
# ## Returns
# `shorten_list` - resulting list after folding
#
# ## Examples:
# [1, 2, 3] => ["1..3"]
# [1, 3] => ["1", "3"]
# [1, 2] => ["1..2"]
# [1, 3, 4, 5] => ["1", "3..5"]
defp shorten_numbers_list(numbers_list) do
{shorten_list, _, _} =
numbers_list
|> Enum.sort()
|> Enum.reduce({[], nil, nil}, fn number, {shorten_list, prev_range_start, prev_number} ->
shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number)
end)
|> then(fn {shorten_list, prev_range_start, prev_number} ->
shorten_numbers_list_impl(prev_number, shorten_list, prev_range_start, prev_number)
end)
Enum.reverse(shorten_list)
end
defp shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number) do
cond do
is_nil(prev_number) ->
{[], number, number}
prev_number + 1 != number and prev_range_start == prev_number ->
{["#{prev_range_start}" | shorten_list], number, number}
prev_number + 1 != number ->
{["#{prev_range_start}..#{prev_number}" | shorten_list], number, number}
true ->
{shorten_list, prev_range_start, number}
end
end
end

@ -0,0 +1,403 @@
defmodule Indexer.Fetcher.ZkSync.Utils.Rpc do
@moduledoc """
Common functions to handle RPC calls for Indexer.Fetcher.ZkSync fetchers
"""
import EthereumJSONRPC, only: [json_rpc: 2, quantity_to_integer: 1]
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_error: 1]
@zero_hash "0000000000000000000000000000000000000000000000000000000000000000"
@zero_hash_binary <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
@rpc_resend_attempts 20
def get_zero_hash do
@zero_hash
end
def get_binary_zero_hash do
@zero_hash_binary
end
@doc """
Filters out logs from a list of transactions logs where topic #0 is `topic_0` and
builds a list of values located at position `position` in such logs.
## Parameters
- `logs`: The list of transaction logs to filter logs with a specific topic.
- `topic_0`: The value of topic #0 in the required logs.
- `position`: The topic number to be extracted from the topic lists of every log
and appended to the resulting list.
## Returns
- A list of values extracted from the required transaction logs.
- An empty list if no logs with the specified topic are found.
"""
@spec filter_logs_and_extract_topic_at(maybe_improper_list(), binary(), integer()) :: list()
def filter_logs_and_extract_topic_at(logs, topic_0, position)
when is_list(logs) and
is_binary(topic_0) and
(is_integer(position) and position >= 0 and position <= 3) do
logs
|> Enum.reduce([], fn log_entity, result ->
topics = log_entity["topics"]
if Enum.at(topics, 0) == topic_0 do
[quantity_to_integer(Enum.at(topics, position)) | result]
else
result
end
end)
end
defp from_ts_to_datetime(time_ts) do
{_, unix_epoch_starts} = DateTime.from_unix(0)
case is_nil(time_ts) or time_ts == 0 do
true ->
unix_epoch_starts
false ->
case DateTime.from_unix(time_ts) do
{:ok, datetime} ->
datetime
{:error, _} ->
unix_epoch_starts
end
end
end
defp from_iso8601_to_datetime(time_string) do
case is_nil(time_string) do
true ->
from_ts_to_datetime(0)
false ->
case DateTime.from_iso8601(time_string) do
{:ok, datetime, _} ->
datetime
{:error, _} ->
from_ts_to_datetime(0)
end
end
end
defp json_txid_to_hash(hash) do
case hash do
"0x" <> tx_hash -> tx_hash
nil -> @zero_hash
end
end
defp strhash_to_byteshash(hash) do
hash
|> json_txid_to_hash()
|> Base.decode16!(case: :mixed)
end
@doc """
Transforms a map with batch data received from the `zks_getL1BatchDetails` call
into a map that can be used by Indexer.Fetcher.ZkSync fetchers for further handling.
All hexadecimal hashes are converted to their decoded binary representation,
Unix and ISO8601 timestamps are converted to DateTime objects.
## Parameters
- `json_response`: Raw data received from the JSON RPC call.
## Returns
- A map containing minimal information about the batch. `start_block` and `end_block`
elements are set to `nil`.
"""
@spec transform_batch_details_to_map(map()) :: map()
def transform_batch_details_to_map(json_response)
when is_map(json_response) do
%{
"number" => {:number, :ok},
"timestamp" => {:timestamp, :ts_to_datetime},
"l1TxCount" => {:l1_tx_count, :ok},
"l2TxCount" => {:l2_tx_count, :ok},
"rootHash" => {:root_hash, :str_to_byteshash},
"commitTxHash" => {:commit_tx_hash, :str_to_byteshash},
"committedAt" => {:commit_timestamp, :iso8601_to_datetime},
"proveTxHash" => {:prove_tx_hash, :str_to_byteshash},
"provenAt" => {:prove_timestamp, :iso8601_to_datetime},
"executeTxHash" => {:executed_tx_hash, :str_to_byteshash},
"executedAt" => {:executed_timestamp, :iso8601_to_datetime},
"l1GasPrice" => {:l1_gas_price, :ok},
"l2FairGasPrice" => {:l2_fair_gas_price, :ok}
# :start_block added by request_block_ranges_by_rpc
# :end_block added by request_block_ranges_by_rpc
}
|> Enum.reduce(%{start_block: nil, end_block: nil}, fn {key, {key_atom, transform_type}}, batch_details_map ->
value_in_json_response = Map.get(json_response, key)
Map.put(
batch_details_map,
key_atom,
case transform_type do
:iso8601_to_datetime -> from_iso8601_to_datetime(value_in_json_response)
:ts_to_datetime -> from_ts_to_datetime(value_in_json_response)
:str_to_txhash -> json_txid_to_hash(value_in_json_response)
:str_to_byteshash -> strhash_to_byteshash(value_in_json_response)
_ -> value_in_json_response
end
)
end)
end
@doc """
Transforms a map with batch data received from the database into a map that
can be used by Indexer.Fetcher.ZkSync fetchers for further handling.
## Parameters
- `batch`: A map containing a batch description received from the database.
## Returns
- A map containing simplified representation of the batch. Compatible with
the database import operation.
"""
def transform_transaction_batch_to_map(batch)
when is_map(batch) do
%{
number: batch.number,
timestamp: batch.timestamp,
l1_tx_count: batch.l1_tx_count,
l2_tx_count: batch.l2_tx_count,
root_hash: batch.root_hash.bytes,
l1_gas_price: batch.l1_gas_price,
l2_fair_gas_price: batch.l2_fair_gas_price,
start_block: batch.start_block,
end_block: batch.end_block,
commit_id: batch.commit_id,
prove_id: batch.prove_id,
execute_id: batch.execute_id
}
end
@doc """
Retrieves batch details from the RPC endpoint using the `zks_getL1BatchDetails` call.
## Parameters
- `batch_number`: The batch number or identifier.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A map containing minimal batch details. It includes `start_block` and `end_block`
elements, both set to `nil`.
"""
@spec fetch_batch_details_by_batch_number(binary() | non_neg_integer(), EthereumJSONRPC.json_rpc_named_arguments()) ::
map()
def fetch_batch_details_by_batch_number(batch_number, json_rpc_named_arguments)
when (is_integer(batch_number) or is_binary(batch_number)) and is_list(json_rpc_named_arguments) do
req =
EthereumJSONRPC.request(%{
id: batch_number,
method: "zks_getL1BatchDetails",
params: [batch_number]
})
error_message = &"Cannot call zks_getL1BatchDetails. Error: #{inspect(&1)}"
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts)
transform_batch_details_to_map(resp)
end
@doc """
Fetches transaction details from the RPC endpoint using the `eth_getTransactionByHash` call.
## Parameters
- `raw_hash`: The hash of the Ethereum transaction. It can be provided as a decoded binary
or hexadecimal string.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A map containing details of the transaction.
"""
@spec fetch_tx_by_hash(binary(), EthereumJSONRPC.json_rpc_named_arguments()) :: map()
def fetch_tx_by_hash(raw_hash, json_rpc_named_arguments)
when is_binary(raw_hash) and is_list(json_rpc_named_arguments) do
hash =
case raw_hash do
"0x" <> _ -> raw_hash
_ -> "0x" <> Base.encode16(raw_hash)
end
req =
EthereumJSONRPC.request(%{
id: 0,
method: "eth_getTransactionByHash",
params: [hash]
})
error_message = &"Cannot call eth_getTransactionByHash for hash #{hash}. Error: #{inspect(&1)}"
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts)
resp
end
@doc """
Fetches the transaction receipt from the RPC endpoint using the `eth_getTransactionReceipt` call.
## Parameters
- `raw_hash`: The hash of the Ethereum transaction. It can be provided as a decoded binary
or hexadecimal string.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A map containing the receipt details of the transaction.
"""
@spec fetch_tx_receipt_by_hash(binary(), EthereumJSONRPC.json_rpc_named_arguments()) :: map()
def fetch_tx_receipt_by_hash(raw_hash, json_rpc_named_arguments)
when is_binary(raw_hash) and is_list(json_rpc_named_arguments) do
hash =
case raw_hash do
"0x" <> _ -> raw_hash
_ -> "0x" <> Base.encode16(raw_hash)
end
req =
EthereumJSONRPC.request(%{
id: 0,
method: "eth_getTransactionReceipt",
params: [hash]
})
error_message = &"Cannot call eth_getTransactionReceipt for hash #{hash}. Error: #{inspect(&1)}"
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts)
resp
end
@doc """
Fetches the latest sealed batch number from the RPC endpoint using the `zks_L1BatchNumber` call.
## Parameters
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A non-negative integer representing the latest sealed batch number.
"""
@spec fetch_latest_sealed_batch_number(EthereumJSONRPC.json_rpc_named_arguments()) :: nil | non_neg_integer()
def fetch_latest_sealed_batch_number(json_rpc_named_arguments)
when is_list(json_rpc_named_arguments) do
req = EthereumJSONRPC.request(%{id: 0, method: "zks_L1BatchNumber", params: []})
error_message = &"Cannot call zks_L1BatchNumber. Error: #{inspect(&1)}"
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts)
quantity_to_integer(resp)
end
@doc """
Fetches block details using multiple `eth_getBlockByNumber` RPC calls.
## Parameters
- `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple
`eth_getBlockByNumber` RPC calls for different block numbers.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A list of responses containing details of the requested blocks.
"""
@spec fetch_blocks_details([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) ::
list()
def fetch_blocks_details(requests_list, json_rpc_named_arguments)
def fetch_blocks_details([], _) do
[]
end
def fetch_blocks_details(requests_list, json_rpc_named_arguments)
when is_list(requests_list) and is_list(json_rpc_named_arguments) do
error_message = &"Cannot call eth_getBlockByNumber. Error: #{inspect(&1)}"
{:ok, responses} =
repeated_call(&json_rpc/2, [requests_list, json_rpc_named_arguments], error_message, @rpc_resend_attempts)
responses
end
@doc """
Fetches batches details using multiple `zks_getL1BatchDetails` RPC calls.
## Parameters
- `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple
`zks_getL1BatchDetails` RPC calls for different block numbers.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A list of responses containing details of the requested batches.
"""
@spec fetch_batches_details([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) ::
list()
def fetch_batches_details(requests_list, json_rpc_named_arguments)
def fetch_batches_details([], _) do
[]
end
def fetch_batches_details(requests_list, json_rpc_named_arguments)
when is_list(requests_list) and is_list(json_rpc_named_arguments) do
error_message = &"Cannot call zks_getL1BatchDetails. Error: #{inspect(&1)}"
{:ok, responses} =
repeated_call(&json_rpc/2, [requests_list, json_rpc_named_arguments], error_message, @rpc_resend_attempts)
responses
end
@doc """
Fetches block ranges included in the specified batches by using multiple
`zks_getL1BatchBlockRange` RPC calls.
## Parameters
- `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple
`zks_getL1BatchBlockRange` RPC calls for different batch numbers.
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection.
## Returns
- A list of responses containing block ranges associated with the requested batches.
"""
@spec fetch_blocks_ranges([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) ::
list()
def fetch_blocks_ranges(requests_list, json_rpc_named_arguments)
def fetch_blocks_ranges([], _) do
[]
end
def fetch_blocks_ranges(requests_list, json_rpc_named_arguments)
when is_list(requests_list) and is_list(json_rpc_named_arguments) do
error_message = &"Cannot call zks_getL1BatchBlockRange. Error: #{inspect(&1)}"
{:ok, responses} =
repeated_call(&json_rpc/2, [requests_list, json_rpc_named_arguments], error_message, @rpc_resend_attempts)
responses
end
defp repeated_call(func, args, error_message, retries_left) do
case apply(func, args) do
{:ok, _} = res ->
res
{:error, message} = err ->
retries_left = retries_left - 1
if retries_left <= 0 do
log_error(error_message.(message))
err
else
log_error("#{error_message.(message)} Retrying...")
:timer.sleep(3000)
repeated_call(func, args, error_message, retries_left)
end
end
end
end

@ -46,6 +46,9 @@ defmodule Indexer.Supervisor do
Withdrawal
}
alias Indexer.Fetcher.ZkSync.BatchesStatusTracker, as: ZkSyncBatchesStatusTracker
alias Indexer.Fetcher.ZkSync.TransactionBatch, as: ZkSyncTransactionBatch
alias Indexer.Temporary.{
BlocksTransactionsMismatch,
UncatalogedTokenTransfers,
@ -167,6 +170,12 @@ defmodule Indexer.Supervisor do
configure(Indexer.Fetcher.PolygonZkevm.BridgeL2.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),
configure(ZkSyncTransactionBatch.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),
configure(ZkSyncBatchesStatusTracker.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),
configure(Indexer.Fetcher.PolygonZkevm.TransactionBatch.Supervisor, [
[json_rpc_named_arguments: json_rpc_named_arguments, memory_monitor: memory_monitor]
]),

@ -18,6 +18,7 @@ defmodule ConfigHelper do
"suave" -> base_repos ++ [Explorer.Repo.Suave]
"filecoin" -> base_repos ++ [Explorer.Repo.Filecoin]
"stability" -> base_repos ++ [Explorer.Repo.Stability]
"zksync" -> base_repos ++ [Explorer.Repo.ZkSync]
_ -> base_repos
end

@ -754,6 +754,21 @@ config :indexer, Indexer.Fetcher.PolygonEdge.WithdrawalExit,
start_block_l1: System.get_env("INDEXER_POLYGON_EDGE_L1_WITHDRAWALS_START_BLOCK"),
exit_helper: System.get_env("INDEXER_POLYGON_EDGE_L1_EXIT_HELPER_CONTRACT")
config :indexer, Indexer.Fetcher.ZkSync.TransactionBatch,
chunk_size: ConfigHelper.parse_integer_env_var("INDEXER_ZKSYNC_BATCHES_CHUNK_SIZE", 50),
batches_max_range: ConfigHelper.parse_integer_env_var("INDEXER_ZKSYNC_NEW_BATCHES_MAX_RANGE", 50),
recheck_interval: ConfigHelper.parse_integer_env_var("INDEXER_ZKSYNC_NEW_BATCHES_RECHECK_INTERVAL", 60)
config :indexer, Indexer.Fetcher.ZkSync.TransactionBatch.Supervisor,
enabled: ConfigHelper.parse_bool_env_var("INDEXER_ZKSYNC_BATCHES_ENABLED")
config :indexer, Indexer.Fetcher.ZkSync.BatchesStatusTracker,
zksync_l1_rpc: System.get_env("INDEXER_ZKSYNC_L1_RPC"),
recheck_interval: ConfigHelper.parse_integer_env_var("INDEXER_ZKSYNC_BATCHES_STATUS_RECHECK_INTERVAL", 60)
config :indexer, Indexer.Fetcher.ZkSync.BatchesStatusTracker.Supervisor,
enabled: ConfigHelper.parse_bool_env_var("INDEXER_ZKSYNC_BATCHES_ENABLED")
config :indexer, Indexer.Fetcher.RootstockData.Supervisor,
disabled?:
ConfigHelper.chain_type() != "rsk" || ConfigHelper.parse_bool_env_var("INDEXER_DISABLE_ROOTSTOCK_DATA_FETCHER")

@ -117,6 +117,15 @@ config :explorer, Explorer.Repo.PolygonZkevm,
# separating repos for different CHAIN_TYPE is implemented only for the sake of keeping DB schema update relevant to the current chain type
pool_size: 1
# Configure ZkSync database
config :explorer, Explorer.Repo.ZkSync,
database: database,
hostname: hostname,
url: System.get_env("DATABASE_URL"),
# actually this repo is not started, and its pool size remains unused.
# separating repos for different CHAIN_TYPE is implemented only for the sake of keeping DB schema update relevant to the current chain type
pool_size: 1
# Configure Rootstock database
config :explorer, Explorer.Repo.RSK,
database: database,
@ -152,8 +161,6 @@ config :explorer, Explorer.Repo.Stability,
database: database,
hostname: hostname,
url: System.get_env("DATABASE_URL"),
# actually this repo is not started, and its pool size remains unused.
# separating repos for different CHAIN_TYPE is implemented only for the sake of keeping DB schema update relevant to the current chain type
pool_size: 1
variant = Variant.get()

@ -87,6 +87,14 @@ config :explorer, Explorer.Repo.PolygonZkevm,
pool_size: 1,
ssl: ExplorerConfigHelper.ssl_enabled?()
# Configures ZkSync database
config :explorer, Explorer.Repo.ZkSync,
url: System.get_env("DATABASE_URL"),
# actually this repo is not started, and its pool size remains unused.
# separating repos for different CHAIN_TYPE is implemented only for the sake of keeping DB schema update relevant to the current chain type
pool_size: 1,
ssl: ExplorerConfigHelper.ssl_enabled?()
# Configures Rootstock database
config :explorer, Explorer.Repo.RSK,
url: System.get_env("DATABASE_URL"),
@ -116,8 +124,6 @@ config :explorer, Explorer.Repo.Filecoin,
# Configures Stability database
config :explorer, Explorer.Repo.Stability,
url: System.get_env("DATABASE_URL"),
# actually this repo is not started, and its pool size remains unused.
# separating repos for different CHAIN_TYPE is implemented only for the sake of keeping DB schema update relevant to the current chain type
pool_size: 1,
ssl: ExplorerConfigHelper.ssl_enabled?()

@ -9,28 +9,113 @@
"apps/block_scout_web/assets/js/lib/ace/src-min/*.js"
],
"words": [
"AION",
"AIRTABLE",
"ARGMAX",
"Aiubo",
"Arbitrum",
"Asfpp",
"Asfpp",
"Autodetection",
"Autonity",
"Blockchair",
"CALLCODE",
"CBOR",
"Cldr",
"Consolas",
"Cyclomatic",
"DATETIME",
"DELEGATECALL",
"Decompiler",
"DefiLlama",
"DefiLlama",
"Denormalization",
"Denormalized",
"ECTO",
"EDCSA",
"Ebhwp",
"Encryptor",
"Erigon",
"Ethash",
"Faileddi",
"Filesize",
"Floki",
"Fuov",
"Hazkne",
"Hodl",
"Iframe",
"Iframes",
"Incrementer",
"Instrumenter",
"Karnaugh",
"Keepalive",
"LUKSO",
"Limegreen",
"MARKETCAP",
"MDWW",
"Mainnets",
"Mendonça",
"Menlo",
"Merkle",
"Mixfile",
"NOTOK",
"Nerg",
"Nerg",
"Nethermind",
"Neue",
"Njhr",
"Nodealus",
"NovesFi",
"Numbe",
"Nunito",
"PGDATABASE",
"PGHOST",
"PGPASSWORD",
"PGPORT",
"PGUSER",
"POSDAO",
"Posix",
"Postrge",
"Qebz",
"Qmbgk",
"REINDEX",
"RPC's",
"RPCs",
"SENDGRID",
"SJONRPC",
"SOLIDITYSCAN",
"SOLIDITYSCAN",
"STATICCALL",
"Secon",
"Segoe",
"Sokol",
"Synthereum",
"Sérgio",
"Tcnwg",
"Testinit",
"Testit",
"Testname",
"Txns",
"UUPS",
"Unitarion",
"Unitorius",
"Unitorus",
"Utqn",
"Wanchain",
"aave",
"absname",
"acbs",
"accs",
"actb",
"addedfile",
"AION",
"AIRTABLE",
"Aiubo",
"alloc",
"amzootyukbugmx",
"apikey",
"Arbitrum",
"ARGMAX",
"arounds",
"asda",
"Asfpp",
"atoken",
"autodetectfalse",
"Autodetection",
"autodetecttrue",
"Autonity",
"autoplay",
"backoff",
"badhash",
@ -46,7 +131,6 @@
"bigserial",
"binwrite",
"bizbuz",
"Blockchair",
"blockheight",
"blockless",
"blockno",
@ -62,15 +146,14 @@
"buildx",
"bytea",
"bytecodes",
"byteshash",
"byts",
"bzzr",
"cacerts",
"callcode",
"CALLCODE",
"calltracer",
"capturelog",
"cattributes",
"CBOR",
"cellspacing",
"certifi",
"cfasync",
@ -78,11 +161,11 @@
"chainlink",
"chakra",
"chartjs",
"checkproxyverification",
"checksummed",
"checkverifystatus",
"childspec",
"citext",
"Cldr",
"clearfix",
"clickover",
"codeformat",
@ -100,8 +183,8 @@
"compilerversion",
"concache",
"cond",
"Consolas",
"contractaddress",
"contractaddresses",
"contractname",
"cooldown",
"cooltesthost",
@ -109,30 +192,23 @@
"ctbs",
"ctid",
"cumalative",
"Cyclomatic",
"cypherpunk",
"czilladx",
"datapoint",
"datepicker",
"DATETIME",
"deae",
"decamelize",
"decompiled",
"decompiler",
"Decompiler",
"dedup",
"DefiLlama",
"defmock",
"defsupervisor",
"dejob",
"dejobio",
"delegatecall",
"DELEGATECALL",
"delegators",
"demonitor",
"denormalization",
"Denormalization",
"Denormalized",
"descr",
"describedby",
"differenceby",
@ -140,22 +216,17 @@
"dropzone",
"dxgd",
"dyntsrohg",
"Ebhwp",
"econnrefused",
"ECTO",
"EDCSA",
"edhygl",
"efkuga",
"Encryptor",
"endregion",
"enetunreach",
"enoent",
"epns",
"Erigon",
"errora",
"errorb",
"erts",
"Ethash",
"erts",
"etherchain",
"ethprice",
"ethsupply",
@ -163,6 +234,7 @@
"etimedout",
"eveem",
"evenodd",
"evmversion",
"exitor",
"explorable",
"exponention",
@ -170,18 +242,16 @@
"extname",
"extremums",
"exvcr",
"Faileddi",
"falala",
"FEVM",
"Filesize",
"Filecoin",
"fkey",
"Floki",
"fkey",
"fontawesome",
"fortawesome",
"fsym",
"fullwidth",
"Fuov",
"fvdskvjglav",
"fwrite",
"fwupv",
@ -190,6 +260,7 @@
"getblockcountdown",
"getblocknobytime",
"getblockreward",
"getcontractcreation",
"getlogs",
"getminedblocks",
"getsourcecode",
@ -208,7 +279,6 @@
"gtag",
"happygokitty",
"haspopup",
"Hazkne",
"histoday",
"hljs",
"Hodl",
@ -217,15 +287,11 @@
"hyperledger",
"ifdef",
"ifeq",
"Iframe",
"iframes",
"Iframes",
"ilike",
"illustr",
"inapp",
"Incrementer",
"insertable",
"Instrumenter",
"intersectionby",
"ints",
"invalidend",
@ -241,19 +307,19 @@
"johnnny",
"jsons",
"juon",
"Karnaugh",
"keccak",
"Keepalive",
"keyout",
"kittencream",
"labeledby",
"labelledby",
"lastmod",
"lastmod",
"lastname",
"lastword",
"lformat",
"libraryaddress",
"libraryname",
"libsecp",
"Limegreen",
"linecap",
"linejoin",
"listaccounts",
@ -261,30 +327,23 @@
"lkve",
"llhauc",
"loggable",
"LUKSO",
"luxon",
"mabi",
"Mainnets",
"malihu",
"mallowance",
"MARKETCAP",
"maxlength",
"mcap",
"mconst",
"mdef",
"MDWW",
"meer",
"Mendonça",
"Menlo",
"meer",
"mergeable",
"Merkle",
"metatags",
"microsecs",
"millis",
"mintings",
"mistmatches",
"miterlimit",
"Mixfile",
"mmem",
"mname",
"mnot",
@ -306,17 +365,12 @@
"mydep",
"nanomorph",
"nbsp",
"Nerg",
"Nethermind",
"Neue",
"newkey",
"nftproduct",
"ngettext",
"nillifies",
"Njhr",
"nlmyzui",
"nocheck",
"Nodealus",
"nohighlight",
"nolink",
"nonconsensus",
@ -325,12 +379,9 @@
"noreferrer",
"noreply",
"noves",
"NovesFi",
"nowarn",
"nowrap",
"ntoa",
"Numbe",
"Nunito",
"nxdomain",
"omni",
"onclick",
@ -346,11 +397,6 @@
"pendingtxlist",
"perc",
"persistable",
"PGDATABASE",
"PGHOST",
"PGPASSWORD",
"PGPORT",
"PGUSER",
"phash",
"pikaday",
"pkey",
@ -363,9 +409,6 @@
"pocc",
"polyline",
"poolboy",
"POSDAO",
"Posix",
"Postrge",
"prederive",
"prederived",
"progressbar",
@ -373,15 +416,15 @@
"psql",
"purrstige",
"qdai",
"Qebz",
"qitmeer",
"Qmbgk",
"qitmeer",
"qrcode",
"queriable",
"questiona",
"questionb",
"qwertyufhgkhiop",
"qwertyuioiuytrewertyuioiuytrertyuio",
"qwertyuioiuytrewertyuioiuytrertyuio",
"racecar",
"raisedbrow",
"rangeright",
@ -414,9 +457,8 @@
"RPCs",
"safelow",
"savechives",
"Secon",
"secp",
"Segoe",
"secp",
"seindexed",
"selfdestruct",
"selfdestructed",
@ -428,13 +470,10 @@
"shibarium",
"shortdoc",
"shortify",
"SJONRPC",
"smallint",
"smth",
"snapshotted",
"snapshotting",
"Sokol",
"SOLIDITYSCAN",
"soljson",
"someout",
"sourcecode",
@ -445,8 +484,8 @@
"stakers",
"stateroot",
"staticcall",
"STATICCALL",
"strftime",
"strhash",
"stringly",
"stylelint",
"stylesheet",
@ -464,26 +503,23 @@
"supernet",
"swal",
"sweetalert",
"Synthereum",
"tabindex",
"tablist",
"tabpanel",
"tarekraafat",
"tbody",
"tbrf",
"Tcnwg",
"tems",
"Testinit",
"Testit",
"Testname",
"testpassword",
"testtest",
"testuser",
"thead",
"thicccbrowz",
"throttleable",
"timestmaps",
"tokenbalance",
"tokenlist",
"tokennfttx",
"tokensupply",
"tokentx",
"topbar",
@ -496,8 +532,8 @@
"tsquery",
"tsvector",
"tsym",
"txid",
"txlistinternal",
"Txns",
"txpool",
"txreceipt",
"ueberauth",
@ -506,9 +542,6 @@
"unclosable",
"unfetched",
"unfinalized",
"Unitarion",
"Unitorius",
"Unitorus",
"unknownc",
"unknowne",
"unmarshal",
@ -523,8 +556,7 @@
"upserts",
"urijs",
"urlset",
"Utqn",
"UUPS",
"urlset",
"valign",
"valuemax",
"valuemin",
@ -536,7 +568,6 @@
"volumeto",
"vyper",
"walletconnect",
"Wanchain",
"warninga",
"warningb",
"watchlist",

@ -188,6 +188,12 @@ INDEXER_DISABLE_INTERNAL_TRANSACTIONS_FETCHER=false
# INDEXER_POLYGON_ZKEVM_L1_BRIDGE_NATIVE_DECIMALS=
# INDEXER_POLYGON_ZKEVM_L2_BRIDGE_START_BLOCK=
# INDEXER_POLYGON_ZKEVM_L2_BRIDGE_CONTRACT=
# INDEXER_ZKSYNC_BATCHES_ENABLED=
# INDEXER_ZKSYNC_BATCHES_CHUNK_SIZE=
# INDEXER_ZKSYNC_NEW_BATCHES_MAX_RANGE=
# INDEXER_ZKSYNC_NEW_BATCHES_RECHECK_INTERVAL=
# INDEXER_ZKSYNC_L1_RPC=
# INDEXER_ZKSYNC_BATCHES_STATUS_RECHECK_INTERVAL=
# INDEXER_REALTIME_FETCHER_MAX_GAP=
# INDEXER_FETCHER_INIT_QUERY_LIMIT=
# INDEXER_TOKEN_BALANCES_FETCHER_INIT_QUERY_LIMIT=

Loading…
Cancel
Save