zksync chain type support (#9631)
* zkSync customizations * Insert placeholders instead of deriving current token balances * ZkSync Batches status tracking (#9080) * initial version of batch tracking * missed file added * attempt to add DB migration * Finalized L1 txs tracking * keep batches in DB * Batches statuses tracker introduction * rpc endponts to get batches data * extended views for blocks and transactions * Refactoring of fetchers * Fetch historical blocks * handle_info calls simplified * Ability to recover missed blocks * zksync info in a separate sub-map * added doc comments, part 1 * finalized doc comments * actual bathes count instead of the last imported batch * fix formatting * credo fixes * Address dialyzer warnings * Fix spelling * remaining issues with spelling and dialyzer * Attempt to address BlockScout Web Tests issue * review comments addressed, part 1 * review comments addressed, part 2 * collection all_options for import module reworked to get rid of dialyzer findings * removed unnecessary functionality * proper import * Credo fixes * Add CHAIN_TYPE=zksync to image generation workflow * Proper handling of empty transactions list in etc_getBlockByNumber * Merge master * Address merge issues * Fix format * Refactoring of chain type specific code for block and transaction views * Consistent name for functions * add exceptions for Credo.Check.Design.AliasUsage * Fix rebasing conflicts * Fix rebase conflicts * fix issue with stability fees in tx view * make Stability related tests dependent on chain type in compile time * move zksync related migration * Changelog updated * removal of duplicated migration * List r,s,v as optional attributes for transaction --------- Co-authored-by: Viktor Baranov <baranov.viktor.27@gmail.com> Co-authored-by: Qwerty5Uiop <alex000010@bk.ru>pull/9640/head
parent
400b45b145
commit
51d82f1dbf
@ -0,0 +1,120 @@ |
|||||||
|
defmodule BlockScoutWeb.API.V2.ZkSyncController do |
||||||
|
use BlockScoutWeb, :controller |
||||||
|
|
||||||
|
import BlockScoutWeb.Chain, |
||||||
|
only: [ |
||||||
|
next_page_params: 4, |
||||||
|
paging_options: 1, |
||||||
|
split_list_by_page: 1 |
||||||
|
] |
||||||
|
|
||||||
|
alias Explorer.Chain.ZkSync.{Reader, TransactionBatch} |
||||||
|
|
||||||
|
action_fallback(BlockScoutWeb.API.V2.FallbackController) |
||||||
|
|
||||||
|
@batch_necessity_by_association %{ |
||||||
|
:commit_transaction => :optional, |
||||||
|
:prove_transaction => :optional, |
||||||
|
:execute_transaction => :optional, |
||||||
|
:l2_transactions => :optional |
||||||
|
} |
||||||
|
|
||||||
|
@batches_necessity_by_association %{ |
||||||
|
:commit_transaction => :optional, |
||||||
|
:prove_transaction => :optional, |
||||||
|
:execute_transaction => :optional |
||||||
|
} |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to handle GET requests to `/api/v2/zksync/batches/:batch_number` endpoint. |
||||||
|
""" |
||||||
|
@spec batch(Plug.Conn.t(), map()) :: Plug.Conn.t() |
||||||
|
def batch(conn, %{"batch_number" => batch_number} = _params) do |
||||||
|
case Reader.batch( |
||||||
|
batch_number, |
||||||
|
necessity_by_association: @batch_necessity_by_association, |
||||||
|
api?: true |
||||||
|
) do |
||||||
|
{:ok, batch} -> |
||||||
|
conn |
||||||
|
|> put_status(200) |
||||||
|
|> render(:zksync_batch, %{batch: batch}) |
||||||
|
|
||||||
|
{:error, :not_found} = res -> |
||||||
|
res |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to handle GET requests to `/api/v2/zksync/batches` endpoint. |
||||||
|
""" |
||||||
|
@spec batches(Plug.Conn.t(), map()) :: Plug.Conn.t() |
||||||
|
def batches(conn, params) do |
||||||
|
{batches, next_page} = |
||||||
|
params |
||||||
|
|> paging_options() |
||||||
|
|> Keyword.put(:necessity_by_association, @batches_necessity_by_association) |
||||||
|
|> Keyword.put(:api?, true) |
||||||
|
|> Reader.batches() |
||||||
|
|> split_list_by_page() |
||||||
|
|
||||||
|
next_page_params = |
||||||
|
next_page_params( |
||||||
|
next_page, |
||||||
|
batches, |
||||||
|
params, |
||||||
|
fn %TransactionBatch{number: number} -> %{"number" => number} end |
||||||
|
) |
||||||
|
|
||||||
|
conn |
||||||
|
|> put_status(200) |
||||||
|
|> render(:zksync_batches, %{ |
||||||
|
batches: batches, |
||||||
|
next_page_params: next_page_params |
||||||
|
}) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to handle GET requests to `/api/v2/zksync/batches/count` endpoint. |
||||||
|
""" |
||||||
|
@spec batches_count(Plug.Conn.t(), map()) :: Plug.Conn.t() |
||||||
|
def batches_count(conn, _params) do |
||||||
|
conn |
||||||
|
|> put_status(200) |
||||||
|
|> render(:zksync_batches_count, %{count: Reader.batches_count(api?: true)}) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to handle GET requests to `/api/v2/main-page/zksync/batches/confirmed` endpoint. |
||||||
|
""" |
||||||
|
@spec batches_confirmed(Plug.Conn.t(), map()) :: Plug.Conn.t() |
||||||
|
def batches_confirmed(conn, _params) do |
||||||
|
batches = |
||||||
|
[] |
||||||
|
|> Keyword.put(:necessity_by_association, @batches_necessity_by_association) |
||||||
|
|> Keyword.put(:api?, true) |
||||||
|
|> Keyword.put(:confirmed?, true) |
||||||
|
|> Reader.batches() |
||||||
|
|
||||||
|
conn |
||||||
|
|> put_status(200) |
||||||
|
|> render(:zksync_batches, %{batches: batches}) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to handle GET requests to `/api/v2/main-page/zksync/batches/latest-number` endpoint. |
||||||
|
""" |
||||||
|
@spec batch_latest_number(Plug.Conn.t(), map()) :: Plug.Conn.t() |
||||||
|
def batch_latest_number(conn, _params) do |
||||||
|
conn |
||||||
|
|> put_status(200) |
||||||
|
|> render(:zksync_batch_latest_number, %{number: batch_latest_number()}) |
||||||
|
end |
||||||
|
|
||||||
|
defp batch_latest_number do |
||||||
|
case Reader.batch(:latest, api?: true) do |
||||||
|
{:ok, batch} -> batch.number |
||||||
|
{:error, :not_found} -> 0 |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,41 @@ |
|||||||
|
defmodule BlockScoutWeb.API.V2.EthereumView do |
||||||
|
alias Explorer.Chain.{Block, Transaction} |
||||||
|
|
||||||
|
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do |
||||||
|
case Map.get(transaction, :beacon_blob_transaction) do |
||||||
|
nil -> |
||||||
|
out_json |
||||||
|
|
||||||
|
%Ecto.Association.NotLoaded{} -> |
||||||
|
out_json |
||||||
|
|
||||||
|
item -> |
||||||
|
out_json |
||||||
|
|> Map.put("max_fee_per_blob_gas", item.max_fee_per_blob_gas) |
||||||
|
|> Map.put("blob_versioned_hashes", item.blob_versioned_hashes) |
||||||
|
|> Map.put("blob_gas_used", item.blob_gas_used) |
||||||
|
|> Map.put("blob_gas_price", item.blob_gas_price) |
||||||
|
|> Map.put("burnt_blob_fee", Decimal.mult(item.blob_gas_used, item.blob_gas_price)) |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
def extend_block_json_response(out_json, %Block{} = block, single_block?) do |
||||||
|
blob_gas_used = Map.get(block, :blob_gas_used) |
||||||
|
excess_blob_gas = Map.get(block, :excess_blob_gas) |
||||||
|
|
||||||
|
if single_block? do |
||||||
|
blob_gas_price = Block.transaction_blob_gas_price(block.transactions) |
||||||
|
burnt_blob_transaction_fees = Decimal.mult(blob_gas_used || 0, blob_gas_price || 0) |
||||||
|
|
||||||
|
out_json |
||||||
|
|> Map.put("blob_gas_used", blob_gas_used) |
||||||
|
|> Map.put("excess_blob_gas", excess_blob_gas) |
||||||
|
|> Map.put("blob_gas_price", blob_gas_price) |
||||||
|
|> Map.put("burnt_blob_fees", burnt_blob_transaction_fees) |
||||||
|
else |
||||||
|
out_json |
||||||
|
|> Map.put("blob_gas_used", blob_gas_used) |
||||||
|
|> Map.put("excess_blob_gas", excess_blob_gas) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,19 @@ |
|||||||
|
defmodule BlockScoutWeb.API.V2.RootstockView do |
||||||
|
alias Explorer.Chain.Block |
||||||
|
|
||||||
|
def extend_block_json_response(out_json, %Block{} = block) do |
||||||
|
out_json |
||||||
|
|> add_optional_transaction_field(block, :minimum_gas_price) |
||||||
|
|> add_optional_transaction_field(block, :bitcoin_merged_mining_header) |
||||||
|
|> add_optional_transaction_field(block, :bitcoin_merged_mining_coinbase_transaction) |
||||||
|
|> add_optional_transaction_field(block, :bitcoin_merged_mining_merkle_proof) |
||||||
|
|> add_optional_transaction_field(block, :hash_for_merged_mining) |
||||||
|
end |
||||||
|
|
||||||
|
defp add_optional_transaction_field(out_json, block, field) do |
||||||
|
case Map.get(block, field) do |
||||||
|
nil -> out_json |
||||||
|
value -> Map.put(out_json, Atom.to_string(field), value) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,126 @@ |
|||||||
|
defmodule BlockScoutWeb.API.V2.StabilityView do |
||||||
|
alias BlockScoutWeb.API.V2.{Helper, TokenView} |
||||||
|
alias Explorer.Chain.{Hash, Log, Token, Transaction} |
||||||
|
|
||||||
|
@api_true [api?: true] |
||||||
|
@transaction_fee_event_signature "0x99e7b0ba56da2819c37c047f0511fd2bf6c9b4e27b4a979a19d6da0f74be8155" |
||||||
|
@transaction_fee_event_abi [ |
||||||
|
%{ |
||||||
|
"anonymous" => false, |
||||||
|
"inputs" => [ |
||||||
|
%{ |
||||||
|
"indexed" => false, |
||||||
|
"internalType" => "address", |
||||||
|
"name" => "token", |
||||||
|
"type" => "address" |
||||||
|
}, |
||||||
|
%{ |
||||||
|
"indexed" => false, |
||||||
|
"internalType" => "uint256", |
||||||
|
"name" => "totalFee", |
||||||
|
"type" => "uint256" |
||||||
|
}, |
||||||
|
%{ |
||||||
|
"indexed" => false, |
||||||
|
"internalType" => "address", |
||||||
|
"name" => "validator", |
||||||
|
"type" => "address" |
||||||
|
}, |
||||||
|
%{ |
||||||
|
"indexed" => false, |
||||||
|
"internalType" => "uint256", |
||||||
|
"name" => "validatorFee", |
||||||
|
"type" => "uint256" |
||||||
|
}, |
||||||
|
%{ |
||||||
|
"indexed" => false, |
||||||
|
"internalType" => "address", |
||||||
|
"name" => "dapp", |
||||||
|
"type" => "address" |
||||||
|
}, |
||||||
|
%{ |
||||||
|
"indexed" => false, |
||||||
|
"internalType" => "uint256", |
||||||
|
"name" => "dappFee", |
||||||
|
"type" => "uint256" |
||||||
|
} |
||||||
|
], |
||||||
|
"name" => "TransactionFee", |
||||||
|
"type" => "event" |
||||||
|
} |
||||||
|
] |
||||||
|
|
||||||
|
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do |
||||||
|
case transaction.transaction_fee_log do |
||||||
|
[ |
||||||
|
{"token", "address", false, token_address_hash}, |
||||||
|
{"totalFee", "uint256", false, total_fee}, |
||||||
|
{"validator", "address", false, validator_address_hash}, |
||||||
|
{"validatorFee", "uint256", false, validator_fee}, |
||||||
|
{"dapp", "address", false, dapp_address_hash}, |
||||||
|
{"dappFee", "uint256", false, dapp_fee} |
||||||
|
] -> |
||||||
|
stability_fee = %{ |
||||||
|
"token" => |
||||||
|
TokenView.render("token.json", %{ |
||||||
|
token: transaction.transaction_fee_token, |
||||||
|
contract_address_hash: bytes_to_address_hash(token_address_hash) |
||||||
|
}), |
||||||
|
"validator_address" => |
||||||
|
Helper.address_with_info(nil, nil, bytes_to_address_hash(validator_address_hash), false), |
||||||
|
"dapp_address" => Helper.address_with_info(nil, nil, bytes_to_address_hash(dapp_address_hash), false), |
||||||
|
"total_fee" => to_string(total_fee), |
||||||
|
"dapp_fee" => to_string(dapp_fee), |
||||||
|
"validator_fee" => to_string(validator_fee) |
||||||
|
} |
||||||
|
|
||||||
|
out_json |
||||||
|
|> Map.put("stability_fee", stability_fee) |
||||||
|
|
||||||
|
_ -> |
||||||
|
out_json |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
def transform_transactions(transactions) do |
||||||
|
do_extend_with_stability_fees_info(transactions) |
||||||
|
end |
||||||
|
|
||||||
|
defp do_extend_with_stability_fees_info(transactions) when is_list(transactions) do |
||||||
|
{transactions, _tokens_acc} = |
||||||
|
Enum.map_reduce(transactions, %{}, fn transaction, tokens_acc -> |
||||||
|
case Log.fetch_log_by_tx_hash_and_first_topic(transaction.hash, @transaction_fee_event_signature, @api_true) do |
||||||
|
fee_log when not is_nil(fee_log) -> |
||||||
|
{:ok, _selector, mapping} = Log.find_and_decode(@transaction_fee_event_abi, fee_log, transaction.hash) |
||||||
|
|
||||||
|
[{"token", "address", false, token_address_hash}, _, _, _, _, _] = mapping |
||||||
|
|
||||||
|
{token, new_tokens_acc} = check_tokens_acc(bytes_to_address_hash(token_address_hash), tokens_acc) |
||||||
|
|
||||||
|
{%Transaction{transaction | transaction_fee_log: mapping, transaction_fee_token: token}, new_tokens_acc} |
||||||
|
|
||||||
|
_ -> |
||||||
|
{transaction, tokens_acc} |
||||||
|
end |
||||||
|
end) |
||||||
|
|
||||||
|
transactions |
||||||
|
end |
||||||
|
|
||||||
|
defp do_extend_with_stability_fees_info(transaction) do |
||||||
|
[transaction] = do_extend_with_stability_fees_info([transaction]) |
||||||
|
transaction |
||||||
|
end |
||||||
|
|
||||||
|
defp check_tokens_acc(token_address_hash, tokens_acc) do |
||||||
|
if Map.has_key?(tokens_acc, token_address_hash) do |
||||||
|
{tokens_acc[token_address_hash], tokens_acc} |
||||||
|
else |
||||||
|
token = Token.get_by_contract_address_hash(token_address_hash, @api_true) |
||||||
|
|
||||||
|
{token, Map.put(tokens_acc, token_address_hash, token)} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp bytes_to_address_hash(bytes), do: %Hash{byte_count: 20, bytes: bytes} |
||||||
|
end |
@ -0,0 +1,130 @@ |
|||||||
|
defmodule BlockScoutWeb.API.V2.SuaveView do |
||||||
|
alias BlockScoutWeb.API.V2.Helper, as: APIHelper |
||||||
|
alias BlockScoutWeb.API.V2.TransactionView |
||||||
|
|
||||||
|
alias Explorer.Helper, as: ExplorerHelper |
||||||
|
|
||||||
|
alias Ecto.Association.NotLoaded |
||||||
|
alias Explorer.Chain.{Hash, Transaction} |
||||||
|
|
||||||
|
@suave_bid_event "0x83481d5b04dea534715acad673a8177a46fc93882760f36bdc16ccac439d504e" |
||||||
|
|
||||||
|
def extend_transaction_json_response(%Transaction{} = transaction, out_json, single_tx?, conn, watchlist_names) do |
||||||
|
if is_nil(Map.get(transaction, :execution_node_hash)) do |
||||||
|
out_json |
||||||
|
else |
||||||
|
wrapped_to_address = Map.get(transaction, :wrapped_to_address) |
||||||
|
wrapped_to_address_hash = Map.get(transaction, :wrapped_to_address_hash) |
||||||
|
wrapped_input = Map.get(transaction, :wrapped_input) |
||||||
|
wrapped_hash = Map.get(transaction, :wrapped_hash) |
||||||
|
execution_node = Map.get(transaction, :execution_node) |
||||||
|
execution_node_hash = Map.get(transaction, :execution_node_hash) |
||||||
|
wrapped_type = Map.get(transaction, :wrapped_type) |
||||||
|
wrapped_nonce = Map.get(transaction, :wrapped_nonce) |
||||||
|
wrapped_gas = Map.get(transaction, :wrapped_gas) |
||||||
|
wrapped_gas_price = Map.get(transaction, :wrapped_gas_price) |
||||||
|
wrapped_max_priority_fee_per_gas = Map.get(transaction, :wrapped_max_priority_fee_per_gas) |
||||||
|
wrapped_max_fee_per_gas = Map.get(transaction, :wrapped_max_fee_per_gas) |
||||||
|
wrapped_value = Map.get(transaction, :wrapped_value) |
||||||
|
|
||||||
|
{[wrapped_decoded_input], _, _} = |
||||||
|
TransactionView.decode_transactions( |
||||||
|
[ |
||||||
|
%Transaction{ |
||||||
|
to_address: wrapped_to_address, |
||||||
|
input: wrapped_input, |
||||||
|
hash: wrapped_hash |
||||||
|
} |
||||||
|
], |
||||||
|
false |
||||||
|
) |
||||||
|
|
||||||
|
out_json |
||||||
|
|> Map.put("allowed_peekers", suave_parse_allowed_peekers(transaction.logs)) |
||||||
|
|> Map.put( |
||||||
|
"execution_node", |
||||||
|
APIHelper.address_with_info( |
||||||
|
conn, |
||||||
|
execution_node, |
||||||
|
execution_node_hash, |
||||||
|
single_tx?, |
||||||
|
watchlist_names |
||||||
|
) |
||||||
|
) |
||||||
|
|> Map.put("wrapped", %{ |
||||||
|
"type" => wrapped_type, |
||||||
|
"nonce" => wrapped_nonce, |
||||||
|
"to" => |
||||||
|
APIHelper.address_with_info( |
||||||
|
conn, |
||||||
|
wrapped_to_address, |
||||||
|
wrapped_to_address_hash, |
||||||
|
single_tx?, |
||||||
|
watchlist_names |
||||||
|
), |
||||||
|
"gas_limit" => wrapped_gas, |
||||||
|
"gas_price" => wrapped_gas_price, |
||||||
|
"fee" => |
||||||
|
TransactionView.format_fee( |
||||||
|
Transaction.fee( |
||||||
|
%Transaction{gas: wrapped_gas, gas_price: wrapped_gas_price, gas_used: nil}, |
||||||
|
:wei |
||||||
|
) |
||||||
|
), |
||||||
|
"max_priority_fee_per_gas" => wrapped_max_priority_fee_per_gas, |
||||||
|
"max_fee_per_gas" => wrapped_max_fee_per_gas, |
||||||
|
"value" => wrapped_value, |
||||||
|
"hash" => wrapped_hash, |
||||||
|
"method" => |
||||||
|
TransactionView.method_name( |
||||||
|
%Transaction{to_address: wrapped_to_address, input: wrapped_input}, |
||||||
|
wrapped_decoded_input |
||||||
|
), |
||||||
|
"decoded_input" => TransactionView.decoded_input(wrapped_decoded_input), |
||||||
|
"raw_input" => wrapped_input |
||||||
|
}) |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
# @spec suave_parse_allowed_peekers(Ecto.Schema.has_many(Log.t())) :: [String.t()] |
||||||
|
defp suave_parse_allowed_peekers(%NotLoaded{}), do: [] |
||||||
|
|
||||||
|
defp suave_parse_allowed_peekers(logs) do |
||||||
|
suave_bid_contracts = |
||||||
|
Application.get_all_env(:explorer)[Transaction][:suave_bid_contracts] |
||||||
|
|> String.split(",") |
||||||
|
|> Enum.map(fn sbc -> String.downcase(String.trim(sbc)) end) |
||||||
|
|
||||||
|
bid_event = |
||||||
|
Enum.find(logs, fn log -> |
||||||
|
sanitize_log_first_topic(log.first_topic) == @suave_bid_event && |
||||||
|
Enum.member?(suave_bid_contracts, String.downcase(Hash.to_string(log.address_hash))) |
||||||
|
end) |
||||||
|
|
||||||
|
if is_nil(bid_event) do |
||||||
|
[] |
||||||
|
else |
||||||
|
[_bid_id, _decryption_condition, allowed_peekers] = |
||||||
|
ExplorerHelper.decode_data(bid_event.data, [{:bytes, 16}, {:uint, 64}, {:array, :address}]) |
||||||
|
|
||||||
|
Enum.map(allowed_peekers, fn peeker -> |
||||||
|
"0x" <> Base.encode16(peeker, case: :lower) |
||||||
|
end) |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp sanitize_log_first_topic(first_topic) do |
||||||
|
if is_nil(first_topic) do |
||||||
|
"" |
||||||
|
else |
||||||
|
sanitized = |
||||||
|
if is_binary(first_topic) do |
||||||
|
first_topic |
||||||
|
else |
||||||
|
Hash.to_string(first_topic) |
||||||
|
end |
||||||
|
|
||||||
|
String.downcase(sanitized) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,235 @@ |
|||||||
|
defmodule BlockScoutWeb.API.V2.ZkSyncView do |
||||||
|
use BlockScoutWeb, :view |
||||||
|
|
||||||
|
alias Explorer.Chain.{Block, Transaction} |
||||||
|
alias Explorer.Chain.ZkSync.TransactionBatch |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to render GET requests to `/api/v2/zksync/batches/:batch_number` endpoint. |
||||||
|
""" |
||||||
|
@spec render(binary(), map()) :: map() | non_neg_integer() |
||||||
|
def render("zksync_batch.json", %{batch: batch}) do |
||||||
|
l2_transactions = |
||||||
|
if Map.has_key?(batch, :l2_transactions) do |
||||||
|
Enum.map(batch.l2_transactions, fn tx -> tx.hash end) |
||||||
|
end |
||||||
|
|
||||||
|
%{ |
||||||
|
"number" => batch.number, |
||||||
|
"timestamp" => batch.timestamp, |
||||||
|
"root_hash" => batch.root_hash, |
||||||
|
"l1_tx_count" => batch.l1_tx_count, |
||||||
|
"l2_tx_count" => batch.l2_tx_count, |
||||||
|
"l1_gas_price" => batch.l1_gas_price, |
||||||
|
"l2_fair_gas_price" => batch.l2_fair_gas_price, |
||||||
|
"start_block" => batch.start_block, |
||||||
|
"end_block" => batch.end_block, |
||||||
|
"transactions" => l2_transactions |
||||||
|
} |
||||||
|
|> add_l1_txs_info_and_status(batch) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to render GET requests to `/api/v2/zksync/batches` endpoint. |
||||||
|
""" |
||||||
|
def render("zksync_batches.json", %{ |
||||||
|
batches: batches, |
||||||
|
next_page_params: next_page_params |
||||||
|
}) do |
||||||
|
%{ |
||||||
|
items: render_zksync_batches(batches), |
||||||
|
next_page_params: next_page_params |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to render GET requests to `/api/v2/main-page/zksync/batches/confirmed` endpoint. |
||||||
|
""" |
||||||
|
def render("zksync_batches.json", %{batches: batches}) do |
||||||
|
%{items: render_zksync_batches(batches)} |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to render GET requests to `/api/v2/zksync/batches/count` endpoint. |
||||||
|
""" |
||||||
|
def render("zksync_batches_count.json", %{count: count}) do |
||||||
|
count |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Function to render GET requests to `/api/v2/main-page/zksync/batches/latest-number` endpoint. |
||||||
|
""" |
||||||
|
def render("zksync_batch_latest_number.json", %{number: number}) do |
||||||
|
number |
||||||
|
end |
||||||
|
|
||||||
|
defp render_zksync_batches(batches) do |
||||||
|
Enum.map(batches, fn batch -> |
||||||
|
%{ |
||||||
|
"number" => batch.number, |
||||||
|
"timestamp" => batch.timestamp, |
||||||
|
"tx_count" => batch.l1_tx_count + batch.l2_tx_count |
||||||
|
} |
||||||
|
|> add_l1_txs_info_and_status(batch) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Extends the json output with a sub-map containing information related |
||||||
|
zksync: batch number and associated L1 transactions and their timestmaps. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `out_json`: a map defining output json which will be extended |
||||||
|
- `transaction`: transaction structure containing zksync related data |
||||||
|
|
||||||
|
## Returns |
||||||
|
A map extended with data related zksync rollup |
||||||
|
""" |
||||||
|
@spec extend_transaction_json_response(map(), %{ |
||||||
|
:__struct__ => Explorer.Chain.Transaction, |
||||||
|
:zksync_batch => any(), |
||||||
|
:zksync_commit_transaction => any(), |
||||||
|
:zksync_execute_transaction => any(), |
||||||
|
:zksync_prove_transaction => any(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: map() |
||||||
|
def extend_transaction_json_response(out_json, %Transaction{} = transaction) do |
||||||
|
do_add_zksync_info(out_json, transaction) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Extends the json output with a sub-map containing information related |
||||||
|
zksync: batch number and associated L1 transactions and their timestmaps. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `out_json`: a map defining output json which will be extended |
||||||
|
- `block`: block structure containing zksync related data |
||||||
|
|
||||||
|
## Returns |
||||||
|
A map extended with data related zksync rollup |
||||||
|
""" |
||||||
|
@spec extend_block_json_response(map(), %{ |
||||||
|
:__struct__ => Explorer.Chain.Block, |
||||||
|
:zksync_batch => any(), |
||||||
|
:zksync_commit_transaction => any(), |
||||||
|
:zksync_execute_transaction => any(), |
||||||
|
:zksync_prove_transaction => any(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: map() |
||||||
|
def extend_block_json_response(out_json, %Block{} = block) do |
||||||
|
do_add_zksync_info(out_json, block) |
||||||
|
end |
||||||
|
|
||||||
|
defp do_add_zksync_info(out_json, zksync_entity) do |
||||||
|
res = |
||||||
|
%{} |
||||||
|
|> do_add_l1_txs_info_and_status(%{ |
||||||
|
batch_number: get_batch_number(zksync_entity), |
||||||
|
commit_transaction: zksync_entity.zksync_commit_transaction, |
||||||
|
prove_transaction: zksync_entity.zksync_prove_transaction, |
||||||
|
execute_transaction: zksync_entity.zksync_execute_transaction |
||||||
|
}) |
||||||
|
|> Map.put("batch_number", get_batch_number(zksync_entity)) |
||||||
|
|
||||||
|
Map.put(out_json, "zksync", res) |
||||||
|
end |
||||||
|
|
||||||
|
defp get_batch_number(zksync_entity) do |
||||||
|
case Map.get(zksync_entity, :zksync_batch) do |
||||||
|
nil -> nil |
||||||
|
%Ecto.Association.NotLoaded{} -> nil |
||||||
|
value -> value.number |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp add_l1_txs_info_and_status(out_json, %TransactionBatch{} = batch) do |
||||||
|
do_add_l1_txs_info_and_status(out_json, batch) |
||||||
|
end |
||||||
|
|
||||||
|
defp do_add_l1_txs_info_and_status(out_json, zksync_item) do |
||||||
|
l1_txs = get_associated_l1_txs(zksync_item) |
||||||
|
|
||||||
|
out_json |
||||||
|
|> Map.merge(%{ |
||||||
|
"status" => batch_status(zksync_item), |
||||||
|
"commit_transaction_hash" => get_2map_data(l1_txs, :commit_transaction, :hash), |
||||||
|
"commit_transaction_timestamp" => get_2map_data(l1_txs, :commit_transaction, :ts), |
||||||
|
"prove_transaction_hash" => get_2map_data(l1_txs, :prove_transaction, :hash), |
||||||
|
"prove_transaction_timestamp" => get_2map_data(l1_txs, :prove_transaction, :ts), |
||||||
|
"execute_transaction_hash" => get_2map_data(l1_txs, :execute_transaction, :hash), |
||||||
|
"execute_transaction_timestamp" => get_2map_data(l1_txs, :execute_transaction, :ts) |
||||||
|
}) |
||||||
|
end |
||||||
|
|
||||||
|
# Extract transaction hash and timestamp for L1 transactions associated with |
||||||
|
# a zksync rollup entity: batch, transaction or block. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `zksync_item`: A batch, transaction, or block. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# A map containing nesting maps describing corresponding L1 transactions |
||||||
|
defp get_associated_l1_txs(zksync_item) do |
||||||
|
[:commit_transaction, :prove_transaction, :execute_transaction] |
||||||
|
|> Enum.reduce(%{}, fn key, l1_txs -> |
||||||
|
case Map.get(zksync_item, key) do |
||||||
|
nil -> Map.put(l1_txs, key, nil) |
||||||
|
%Ecto.Association.NotLoaded{} -> Map.put(l1_txs, key, nil) |
||||||
|
value -> Map.put(l1_txs, key, %{hash: value.hash, ts: value.timestamp}) |
||||||
|
end |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
# Inspects L1 transactions of the batch to determine the batch status. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `zksync_item`: A batch, transaction, or block. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# A string with one of predefined statuses |
||||||
|
defp batch_status(zksync_item) do |
||||||
|
cond do |
||||||
|
specified?(zksync_item.execute_transaction) -> "Executed on L1" |
||||||
|
specified?(zksync_item.prove_transaction) -> "Validated on L1" |
||||||
|
specified?(zksync_item.commit_transaction) -> "Sent to L1" |
||||||
|
# Batch entity itself has no batch_number |
||||||
|
not Map.has_key?(zksync_item, :batch_number) -> "Sealed on L2" |
||||||
|
not is_nil(zksync_item.batch_number) -> "Sealed on L2" |
||||||
|
true -> "Processed on L2" |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
# Checks if an item associated with a DB entity has actual value |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `associated_item`: an item associated with a DB entity |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `false`: if the item is nil or not loaded |
||||||
|
# - `true`: if the item has actual value |
||||||
|
defp specified?(associated_item) do |
||||||
|
case associated_item do |
||||||
|
nil -> false |
||||||
|
%Ecto.Association.NotLoaded{} -> false |
||||||
|
_ -> true |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
# Gets the value of an element nested in a map using two keys. |
||||||
|
# |
||||||
|
# Clarification: Returns `map[key1][key2]` |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `map`: The high-level map. |
||||||
|
# - `key1`: The key of the element in `map`. |
||||||
|
# - `key2`: The key of the element in the map accessible by `map[key1]`. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# The value of the element, or `nil` if the map accessible by `key1` does not exist. |
||||||
|
defp get_2map_data(map, key1, key2) do |
||||||
|
case Map.get(map, key1) do |
||||||
|
nil -> nil |
||||||
|
inner_map -> Map.get(inner_map, key2) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,79 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Runner.ZkSync.BatchBlocks do |
||||||
|
@moduledoc """ |
||||||
|
Bulk imports `t:Explorer.Chain.ZkSync.BatchBlock.t/0`. |
||||||
|
""" |
||||||
|
|
||||||
|
require Ecto.Query |
||||||
|
|
||||||
|
alias Ecto.{Changeset, Multi, Repo} |
||||||
|
alias Explorer.Chain.Import |
||||||
|
alias Explorer.Chain.ZkSync.BatchBlock |
||||||
|
alias Explorer.Prometheus.Instrumenter |
||||||
|
|
||||||
|
@behaviour Import.Runner |
||||||
|
|
||||||
|
# milliseconds |
||||||
|
@timeout 60_000 |
||||||
|
|
||||||
|
@type imported :: [BatchBlock.t()] |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def ecto_schema_module, do: BatchBlock |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def option_key, do: :zksync_batch_blocks |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} |
||||||
|
def imported_table_row do |
||||||
|
%{ |
||||||
|
value_type: "[#{ecto_schema_module()}.t()]", |
||||||
|
value_description: "List of `t:#{ecto_schema_module()}.t/0`s" |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec run(Multi.t(), list(), map()) :: Multi.t() |
||||||
|
def run(multi, changes_list, %{timestamps: timestamps} = options) do |
||||||
|
insert_options = |
||||||
|
options |
||||||
|
|> Map.get(option_key(), %{}) |
||||||
|
|> Map.take(~w(on_conflict timeout)a) |
||||||
|
|> Map.put_new(:timeout, @timeout) |
||||||
|
|> Map.put(:timestamps, timestamps) |
||||||
|
|
||||||
|
Multi.run(multi, :insert_zksync_batch_blocks, fn repo, _ -> |
||||||
|
Instrumenter.block_import_stage_runner( |
||||||
|
fn -> insert(repo, changes_list, insert_options) end, |
||||||
|
:block_referencing, |
||||||
|
:zksync_batch_blocks, |
||||||
|
:zksync_batch_blocks |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def timeout, do: @timeout |
||||||
|
|
||||||
|
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: |
||||||
|
{:ok, [BatchBlock.t()]} |
||||||
|
| {:error, [Changeset.t()]} |
||||||
|
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = _options) when is_list(changes_list) do |
||||||
|
# Enforce ZkSync.BatchBlock ShareLocks order (see docs: sharelock.md) |
||||||
|
ordered_changes_list = Enum.sort_by(changes_list, & &1.hash) |
||||||
|
|
||||||
|
{:ok, inserted} = |
||||||
|
Import.insert_changes_list( |
||||||
|
repo, |
||||||
|
ordered_changes_list, |
||||||
|
for: BatchBlock, |
||||||
|
returning: true, |
||||||
|
timeout: timeout, |
||||||
|
timestamps: timestamps, |
||||||
|
conflict_target: :hash, |
||||||
|
on_conflict: :nothing |
||||||
|
) |
||||||
|
|
||||||
|
{:ok, inserted} |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,79 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Runner.ZkSync.BatchTransactions do |
||||||
|
@moduledoc """ |
||||||
|
Bulk imports `t:Explorer.Chain.ZkSync.BatchTransaction.t/0`. |
||||||
|
""" |
||||||
|
|
||||||
|
require Ecto.Query |
||||||
|
|
||||||
|
alias Ecto.{Changeset, Multi, Repo} |
||||||
|
alias Explorer.Chain.Import |
||||||
|
alias Explorer.Chain.ZkSync.BatchTransaction |
||||||
|
alias Explorer.Prometheus.Instrumenter |
||||||
|
|
||||||
|
@behaviour Import.Runner |
||||||
|
|
||||||
|
# milliseconds |
||||||
|
@timeout 60_000 |
||||||
|
|
||||||
|
@type imported :: [BatchTransaction.t()] |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def ecto_schema_module, do: BatchTransaction |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def option_key, do: :zksync_batch_transactions |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} |
||||||
|
def imported_table_row do |
||||||
|
%{ |
||||||
|
value_type: "[#{ecto_schema_module()}.t()]", |
||||||
|
value_description: "List of `t:#{ecto_schema_module()}.t/0`s" |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec run(Multi.t(), list(), map()) :: Multi.t() |
||||||
|
def run(multi, changes_list, %{timestamps: timestamps} = options) do |
||||||
|
insert_options = |
||||||
|
options |
||||||
|
|> Map.get(option_key(), %{}) |
||||||
|
|> Map.take(~w(on_conflict timeout)a) |
||||||
|
|> Map.put_new(:timeout, @timeout) |
||||||
|
|> Map.put(:timestamps, timestamps) |
||||||
|
|
||||||
|
Multi.run(multi, :insert_zksync_batch_transactions, fn repo, _ -> |
||||||
|
Instrumenter.block_import_stage_runner( |
||||||
|
fn -> insert(repo, changes_list, insert_options) end, |
||||||
|
:block_referencing, |
||||||
|
:zksync_batch_transactions, |
||||||
|
:zksync_batch_transactions |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def timeout, do: @timeout |
||||||
|
|
||||||
|
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: |
||||||
|
{:ok, [BatchTransaction.t()]} |
||||||
|
| {:error, [Changeset.t()]} |
||||||
|
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = _options) when is_list(changes_list) do |
||||||
|
# Enforce ZkSync.BatchTransaction ShareLocks order (see docs: sharelock.md) |
||||||
|
ordered_changes_list = Enum.sort_by(changes_list, & &1.hash) |
||||||
|
|
||||||
|
{:ok, inserted} = |
||||||
|
Import.insert_changes_list( |
||||||
|
repo, |
||||||
|
ordered_changes_list, |
||||||
|
for: BatchTransaction, |
||||||
|
returning: true, |
||||||
|
timeout: timeout, |
||||||
|
timestamps: timestamps, |
||||||
|
conflict_target: :hash, |
||||||
|
on_conflict: :nothing |
||||||
|
) |
||||||
|
|
||||||
|
{:ok, inserted} |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,103 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Runner.ZkSync.LifecycleTransactions do |
||||||
|
@moduledoc """ |
||||||
|
Bulk imports `t:Explorer.Chain.ZkSync.LifecycleTransaction.t/0`. |
||||||
|
""" |
||||||
|
|
||||||
|
require Ecto.Query |
||||||
|
|
||||||
|
alias Ecto.{Changeset, Multi, Repo} |
||||||
|
alias Explorer.Chain.Import |
||||||
|
alias Explorer.Chain.ZkSync.LifecycleTransaction |
||||||
|
alias Explorer.Prometheus.Instrumenter |
||||||
|
|
||||||
|
import Ecto.Query, only: [from: 2] |
||||||
|
|
||||||
|
@behaviour Import.Runner |
||||||
|
|
||||||
|
# milliseconds |
||||||
|
@timeout 60_000 |
||||||
|
|
||||||
|
@type imported :: [LifecycleTransaction.t()] |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def ecto_schema_module, do: LifecycleTransaction |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def option_key, do: :zksync_lifecycle_transactions |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} |
||||||
|
def imported_table_row do |
||||||
|
%{ |
||||||
|
value_type: "[#{ecto_schema_module()}.t()]", |
||||||
|
value_description: "List of `t:#{ecto_schema_module()}.t/0`s" |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec run(Multi.t(), list(), map()) :: Multi.t() |
||||||
|
def run(multi, changes_list, %{timestamps: timestamps} = options) do |
||||||
|
insert_options = |
||||||
|
options |
||||||
|
|> Map.get(option_key(), %{}) |
||||||
|
|> Map.take(~w(on_conflict timeout)a) |
||||||
|
|> Map.put_new(:timeout, @timeout) |
||||||
|
|> Map.put(:timestamps, timestamps) |
||||||
|
|
||||||
|
Multi.run(multi, :insert_zksync_lifecycle_transactions, fn repo, _ -> |
||||||
|
Instrumenter.block_import_stage_runner( |
||||||
|
fn -> insert(repo, changes_list, insert_options) end, |
||||||
|
:block_referencing, |
||||||
|
:zksync_lifecycle_transactions, |
||||||
|
:zksync_lifecycle_transactions |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def timeout, do: @timeout |
||||||
|
|
||||||
|
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: |
||||||
|
{:ok, [LifecycleTransaction.t()]} |
||||||
|
| {:error, [Changeset.t()]} |
||||||
|
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do |
||||||
|
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) |
||||||
|
|
||||||
|
# Enforce ZkSync.LifecycleTransaction ShareLocks order (see docs: sharelock.md) |
||||||
|
ordered_changes_list = Enum.sort_by(changes_list, & &1.id) |
||||||
|
|
||||||
|
{:ok, inserted} = |
||||||
|
Import.insert_changes_list( |
||||||
|
repo, |
||||||
|
ordered_changes_list, |
||||||
|
for: LifecycleTransaction, |
||||||
|
returning: true, |
||||||
|
timeout: timeout, |
||||||
|
timestamps: timestamps, |
||||||
|
conflict_target: :hash, |
||||||
|
on_conflict: on_conflict |
||||||
|
) |
||||||
|
|
||||||
|
{:ok, inserted} |
||||||
|
end |
||||||
|
|
||||||
|
defp default_on_conflict do |
||||||
|
from( |
||||||
|
tx in LifecycleTransaction, |
||||||
|
update: [ |
||||||
|
set: [ |
||||||
|
# don't update `id` as it is a primary key |
||||||
|
# don't update `hash` as it is a unique index and used for the conflict target |
||||||
|
timestamp: fragment("EXCLUDED.timestamp"), |
||||||
|
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tx.inserted_at), |
||||||
|
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tx.updated_at) |
||||||
|
] |
||||||
|
], |
||||||
|
where: |
||||||
|
fragment( |
||||||
|
"(EXCLUDED.timestamp) IS DISTINCT FROM (?)", |
||||||
|
tx.timestamp |
||||||
|
) |
||||||
|
) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,122 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Runner.ZkSync.TransactionBatches do |
||||||
|
@moduledoc """ |
||||||
|
Bulk imports `t:Explorer.Chain.ZkSync.TransactionBatch.t/0`. |
||||||
|
""" |
||||||
|
|
||||||
|
require Ecto.Query |
||||||
|
|
||||||
|
alias Ecto.{Changeset, Multi, Repo} |
||||||
|
alias Explorer.Chain.Import |
||||||
|
alias Explorer.Chain.ZkSync.TransactionBatch |
||||||
|
alias Explorer.Prometheus.Instrumenter |
||||||
|
|
||||||
|
import Ecto.Query, only: [from: 2] |
||||||
|
|
||||||
|
@behaviour Import.Runner |
||||||
|
|
||||||
|
# milliseconds |
||||||
|
@timeout 60_000 |
||||||
|
|
||||||
|
@type imported :: [TransactionBatch.t()] |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def ecto_schema_module, do: TransactionBatch |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def option_key, do: :zksync_transaction_batches |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec imported_table_row() :: %{:value_description => binary(), :value_type => binary()} |
||||||
|
def imported_table_row do |
||||||
|
%{ |
||||||
|
value_type: "[#{ecto_schema_module()}.t()]", |
||||||
|
value_description: "List of `t:#{ecto_schema_module()}.t/0`s" |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
@spec run(Multi.t(), list(), map()) :: Multi.t() |
||||||
|
def run(multi, changes_list, %{timestamps: timestamps} = options) do |
||||||
|
insert_options = |
||||||
|
options |
||||||
|
|> Map.get(option_key(), %{}) |
||||||
|
|> Map.take(~w(on_conflict timeout)a) |
||||||
|
|> Map.put_new(:timeout, @timeout) |
||||||
|
|> Map.put(:timestamps, timestamps) |
||||||
|
|
||||||
|
Multi.run(multi, :insert_zksync_transaction_batches, fn repo, _ -> |
||||||
|
Instrumenter.block_import_stage_runner( |
||||||
|
fn -> insert(repo, changes_list, insert_options) end, |
||||||
|
:block_referencing, |
||||||
|
:zksync_transaction_batches, |
||||||
|
:zksync_transaction_batches |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def timeout, do: @timeout |
||||||
|
|
||||||
|
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: |
||||||
|
{:ok, [TransactionBatch.t()]} |
||||||
|
| {:error, [Changeset.t()]} |
||||||
|
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do |
||||||
|
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) |
||||||
|
|
||||||
|
# Enforce ZkSync.TransactionBatch ShareLocks order (see docs: sharelock.md) |
||||||
|
ordered_changes_list = Enum.sort_by(changes_list, & &1.number) |
||||||
|
|
||||||
|
{:ok, inserted} = |
||||||
|
Import.insert_changes_list( |
||||||
|
repo, |
||||||
|
ordered_changes_list, |
||||||
|
for: TransactionBatch, |
||||||
|
returning: true, |
||||||
|
timeout: timeout, |
||||||
|
timestamps: timestamps, |
||||||
|
conflict_target: :number, |
||||||
|
on_conflict: on_conflict |
||||||
|
) |
||||||
|
|
||||||
|
{:ok, inserted} |
||||||
|
end |
||||||
|
|
||||||
|
defp default_on_conflict do |
||||||
|
from( |
||||||
|
tb in TransactionBatch, |
||||||
|
update: [ |
||||||
|
set: [ |
||||||
|
# don't update `number` as it is a primary key and used for the conflict target |
||||||
|
timestamp: fragment("EXCLUDED.timestamp"), |
||||||
|
l1_tx_count: fragment("EXCLUDED.l1_tx_count"), |
||||||
|
l2_tx_count: fragment("EXCLUDED.l2_tx_count"), |
||||||
|
root_hash: fragment("EXCLUDED.root_hash"), |
||||||
|
l1_gas_price: fragment("EXCLUDED.l1_gas_price"), |
||||||
|
l2_fair_gas_price: fragment("EXCLUDED.l2_fair_gas_price"), |
||||||
|
start_block: fragment("EXCLUDED.start_block"), |
||||||
|
end_block: fragment("EXCLUDED.end_block"), |
||||||
|
commit_id: fragment("EXCLUDED.commit_id"), |
||||||
|
prove_id: fragment("EXCLUDED.prove_id"), |
||||||
|
execute_id: fragment("EXCLUDED.execute_id"), |
||||||
|
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", tb.inserted_at), |
||||||
|
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", tb.updated_at) |
||||||
|
] |
||||||
|
], |
||||||
|
where: |
||||||
|
fragment( |
||||||
|
"(EXCLUDED.timestamp, EXCLUDED.l1_tx_count, EXCLUDED.l2_tx_count, EXCLUDED.root_hash, EXCLUDED.l1_gas_price, EXCLUDED.l2_fair_gas_price, EXCLUDED.start_block, EXCLUDED.end_block, EXCLUDED.commit_id, EXCLUDED.prove_id, EXCLUDED.execute_id) IS DISTINCT FROM (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", |
||||||
|
tb.timestamp, |
||||||
|
tb.l1_tx_count, |
||||||
|
tb.l2_tx_count, |
||||||
|
tb.root_hash, |
||||||
|
tb.l1_gas_price, |
||||||
|
tb.l2_fair_gas_price, |
||||||
|
tb.start_block, |
||||||
|
tb.end_block, |
||||||
|
tb.commit_id, |
||||||
|
tb.prove_id, |
||||||
|
tb.execute_id |
||||||
|
) |
||||||
|
) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,30 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Stage.AddressReferencing do |
||||||
|
@moduledoc """ |
||||||
|
Imports any tables that reference `t:Explorer.Chain.Address.t/0` and that were imported by |
||||||
|
`Explorer.Chain.Import.Stage.Addresses`. |
||||||
|
""" |
||||||
|
|
||||||
|
alias Explorer.Chain.Import.{Runner, Stage} |
||||||
|
|
||||||
|
@behaviour Stage |
||||||
|
|
||||||
|
@impl Stage |
||||||
|
def runners, |
||||||
|
do: [ |
||||||
|
Runner.Address.CoinBalances, |
||||||
|
Runner.Blocks, |
||||||
|
Runner.Address.CoinBalancesDaily |
||||||
|
] |
||||||
|
|
||||||
|
@impl Stage |
||||||
|
def all_runners, |
||||||
|
do: runners() |
||||||
|
|
||||||
|
@impl Stage |
||||||
|
def multis(runner_to_changes_list, options) do |
||||||
|
{final_multi, final_remaining_runner_to_changes_list} = |
||||||
|
Stage.single_multi(runners(), runner_to_changes_list, options) |
||||||
|
|
||||||
|
{[final_multi], final_remaining_runner_to_changes_list} |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,26 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Stage.Addresses do |
||||||
|
@moduledoc """ |
||||||
|
Imports addresses before anything else that references them because an unused address is still valid and recoverable |
||||||
|
if the other stage(s) don't commit. |
||||||
|
""" |
||||||
|
|
||||||
|
alias Explorer.Chain.Import.{Runner, Stage} |
||||||
|
|
||||||
|
@behaviour Stage |
||||||
|
|
||||||
|
@runner Runner.Addresses |
||||||
|
|
||||||
|
@impl Stage |
||||||
|
def runners, do: [@runner] |
||||||
|
|
||||||
|
@impl Stage |
||||||
|
def all_runners, |
||||||
|
do: runners() |
||||||
|
|
||||||
|
@chunk_size 50 |
||||||
|
|
||||||
|
@impl Stage |
||||||
|
def multis(runner_to_changes_list, options) do |
||||||
|
Stage.chunk_every(runner_to_changes_list, @runner, @chunk_size, options) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,37 @@ |
|||||||
|
defmodule Explorer.Chain.ZkSync.BatchBlock do |
||||||
|
@moduledoc "Models a list of blocks related to a batch for ZkSync." |
||||||
|
|
||||||
|
use Explorer.Schema |
||||||
|
|
||||||
|
alias Explorer.Chain.{Block, Hash} |
||||||
|
alias Explorer.Chain.ZkSync.TransactionBatch |
||||||
|
|
||||||
|
@required_attrs ~w(batch_number hash)a |
||||||
|
|
||||||
|
@type t :: %__MODULE__{ |
||||||
|
batch_number: non_neg_integer(), |
||||||
|
batch: %Ecto.Association.NotLoaded{} | TransactionBatch.t() | nil, |
||||||
|
hash: Hash.t(), |
||||||
|
block: %Ecto.Association.NotLoaded{} | Block.t() | nil |
||||||
|
} |
||||||
|
|
||||||
|
@primary_key false |
||||||
|
schema "zksync_batch_l2_blocks" do |
||||||
|
belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer) |
||||||
|
belongs_to(:block, Block, foreign_key: :hash, primary_key: true, references: :hash, type: Hash.Full) |
||||||
|
|
||||||
|
timestamps() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Validates that the `attrs` are valid. |
||||||
|
""" |
||||||
|
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() |
||||||
|
def changeset(%__MODULE__{} = items, attrs \\ %{}) do |
||||||
|
items |
||||||
|
|> cast(attrs, @required_attrs) |
||||||
|
|> validate_required(@required_attrs) |
||||||
|
|> foreign_key_constraint(:batch_number) |
||||||
|
|> unique_constraint(:hash) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,37 @@ |
|||||||
|
defmodule Explorer.Chain.ZkSync.BatchTransaction do |
||||||
|
@moduledoc "Models a list of transactions related to a batch for ZkSync." |
||||||
|
|
||||||
|
use Explorer.Schema |
||||||
|
|
||||||
|
alias Explorer.Chain.{Hash, Transaction} |
||||||
|
alias Explorer.Chain.ZkSync.TransactionBatch |
||||||
|
|
||||||
|
@required_attrs ~w(batch_number hash)a |
||||||
|
|
||||||
|
@type t :: %__MODULE__{ |
||||||
|
batch_number: non_neg_integer(), |
||||||
|
batch: %Ecto.Association.NotLoaded{} | TransactionBatch.t() | nil, |
||||||
|
hash: Hash.t(), |
||||||
|
l2_transaction: %Ecto.Association.NotLoaded{} | Transaction.t() | nil |
||||||
|
} |
||||||
|
|
||||||
|
@primary_key false |
||||||
|
schema "zksync_batch_l2_transactions" do |
||||||
|
belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer) |
||||||
|
belongs_to(:l2_transaction, Transaction, foreign_key: :hash, primary_key: true, references: :hash, type: Hash.Full) |
||||||
|
|
||||||
|
timestamps() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Validates that the `attrs` are valid. |
||||||
|
""" |
||||||
|
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() |
||||||
|
def changeset(%__MODULE__{} = transactions, attrs \\ %{}) do |
||||||
|
transactions |
||||||
|
|> cast(attrs, @required_attrs) |
||||||
|
|> validate_required(@required_attrs) |
||||||
|
|> foreign_key_constraint(:batch_number) |
||||||
|
|> unique_constraint(:hash) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,38 @@ |
|||||||
|
defmodule Explorer.Chain.ZkSync.LifecycleTransaction do |
||||||
|
@moduledoc "Models an L1 lifecycle transaction for ZkSync." |
||||||
|
|
||||||
|
use Explorer.Schema |
||||||
|
|
||||||
|
alias Explorer.Chain.Hash |
||||||
|
alias Explorer.Chain.ZkSync.TransactionBatch |
||||||
|
|
||||||
|
@required_attrs ~w(id hash timestamp)a |
||||||
|
|
||||||
|
@type t :: %__MODULE__{ |
||||||
|
hash: Hash.t(), |
||||||
|
timestamp: DateTime.t() |
||||||
|
} |
||||||
|
|
||||||
|
@primary_key {:id, :integer, autogenerate: false} |
||||||
|
schema "zksync_lifecycle_l1_transactions" do |
||||||
|
field(:hash, Hash.Full) |
||||||
|
field(:timestamp, :utc_datetime_usec) |
||||||
|
|
||||||
|
has_many(:committed_batches, TransactionBatch, foreign_key: :commit_id) |
||||||
|
has_many(:proven_batches, TransactionBatch, foreign_key: :prove_id) |
||||||
|
has_many(:executed_batches, TransactionBatch, foreign_key: :execute_id) |
||||||
|
|
||||||
|
timestamps() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Validates that the `attrs` are valid. |
||||||
|
""" |
||||||
|
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() |
||||||
|
def changeset(%__MODULE__{} = txn, attrs \\ %{}) do |
||||||
|
txn |
||||||
|
|> cast(attrs, @required_attrs) |
||||||
|
|> validate_required(@required_attrs) |
||||||
|
|> unique_constraint(:id) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,339 @@ |
|||||||
|
defmodule Explorer.Chain.ZkSync.Reader do |
||||||
|
@moduledoc "Contains read functions for zksync modules." |
||||||
|
|
||||||
|
import Ecto.Query, |
||||||
|
only: [ |
||||||
|
from: 2, |
||||||
|
limit: 2, |
||||||
|
order_by: 2, |
||||||
|
where: 2, |
||||||
|
where: 3 |
||||||
|
] |
||||||
|
|
||||||
|
import Explorer.Chain, only: [select_repo: 1] |
||||||
|
|
||||||
|
alias Explorer.Chain.ZkSync.{ |
||||||
|
BatchTransaction, |
||||||
|
LifecycleTransaction, |
||||||
|
TransactionBatch |
||||||
|
} |
||||||
|
|
||||||
|
alias Explorer.{Chain, PagingOptions, Repo} |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Receives total amount of batches imported to the `zksync_transaction_batches` table. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `options`: passed to `Chain.select_repo()` |
||||||
|
|
||||||
|
## Returns |
||||||
|
Total amount of batches |
||||||
|
""" |
||||||
|
@spec batches_count(keyword()) :: any() |
||||||
|
def batches_count(options) do |
||||||
|
TransactionBatch |
||||||
|
|> select_repo(options).aggregate(:count, timeout: :infinity) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Receives the batch from the `zksync_transaction_batches` table by using its number or the latest batch if `:latest` is used. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `number`: could be either the batch number or `:latest` to get the latest available in DB batch |
||||||
|
- `options`: passed to `Chain.select_repo()` |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `{:ok, Explorer.Chain.ZkSync.TransactionBatch}` if the batch found |
||||||
|
- `{:error, :not_found}` if there is no batch with such number |
||||||
|
""" |
||||||
|
@spec batch(:latest | binary() | integer(), keyword()) :: |
||||||
|
{:error, :not_found} | {:ok, Explorer.Chain.ZkSync.TransactionBatch} |
||||||
|
def batch(number, options) |
||||||
|
|
||||||
|
def batch(:latest, options) when is_list(options) do |
||||||
|
TransactionBatch |
||||||
|
|> order_by(desc: :number) |
||||||
|
|> limit(1) |
||||||
|
|> select_repo(options).one() |
||||||
|
|> case do |
||||||
|
nil -> {:error, :not_found} |
||||||
|
batch -> {:ok, batch} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
def batch(number, options) |
||||||
|
when (is_integer(number) or is_binary(number)) and |
||||||
|
is_list(options) do |
||||||
|
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
||||||
|
|
||||||
|
TransactionBatch |
||||||
|
|> where(number: ^number) |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> select_repo(options).one() |
||||||
|
|> case do |
||||||
|
nil -> {:error, :not_found} |
||||||
|
batch -> {:ok, batch} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Receives a list of batches from the `zksync_transaction_batches` table within the range of batch numbers |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `start_number`: The start of the batch numbers range. |
||||||
|
- `end_number`: The end of the batch numbers range. |
||||||
|
- `options`: Options passed to `Chain.select_repo()`. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of `Explorer.Chain.ZkSync.TransactionBatch` if at least one batch exists within the range. |
||||||
|
- An empty list (`[]`) if no batches within the range are found in the database. |
||||||
|
""" |
||||||
|
@spec batches(integer(), integer(), keyword()) :: [Explorer.Chain.ZkSync.TransactionBatch] |
||||||
|
def batches(start_number, end_number, options) |
||||||
|
when is_integer(start_number) and |
||||||
|
is_integer(end_number) and |
||||||
|
is_list(options) do |
||||||
|
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
||||||
|
|
||||||
|
base_query = from(tb in TransactionBatch, order_by: [desc: tb.number]) |
||||||
|
|
||||||
|
base_query |
||||||
|
|> where([tb], tb.number >= ^start_number and tb.number <= ^end_number) |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> select_repo(options).all() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Receives a list of batches from the `zksync_transaction_batches` table with the numbers defined in the input list. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `numbers`: The list of batch numbers to retrieve from the database. |
||||||
|
- `options`: Options passed to `Chain.select_repo()`. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of `Explorer.Chain.ZkSync.TransactionBatch` if at least one batch matches the numbers from the list. The output list could be less than the input list. |
||||||
|
- An empty list (`[]`) if no batches with numbers from the list are found. |
||||||
|
""" |
||||||
|
@spec batches(maybe_improper_list(integer(), []), keyword()) :: [Explorer.Chain.ZkSync.TransactionBatch] |
||||||
|
def batches(numbers, options) |
||||||
|
when is_list(numbers) and |
||||||
|
is_list(options) do |
||||||
|
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
||||||
|
|
||||||
|
base_query = from(tb in TransactionBatch, order_by: [desc: tb.number]) |
||||||
|
|
||||||
|
base_query |
||||||
|
|> where([tb], tb.number in ^numbers) |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> select_repo(options).all() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Receives a list of batches from the `zksync_transaction_batches` table. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `options`: Options passed to `Chain.select_repo()`. (Optional) |
||||||
|
|
||||||
|
## Returns |
||||||
|
- If the option `confirmed?` is set, returns the ten latest committed batches (`Explorer.Chain.ZkSync.TransactionBatch`). |
||||||
|
- Returns a list of `Explorer.Chain.ZkSync.TransactionBatch` based on the paging options if `confirmed?` is not set. |
||||||
|
""" |
||||||
|
@spec batches(keyword()) :: [Explorer.Chain.ZkSync.TransactionBatch] |
||||||
|
@spec batches() :: [Explorer.Chain.ZkSync.TransactionBatch] |
||||||
|
def batches(options \\ []) when is_list(options) do |
||||||
|
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
||||||
|
|
||||||
|
base_query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
order_by: [desc: tb.number] |
||||||
|
) |
||||||
|
|
||||||
|
query = |
||||||
|
if Keyword.get(options, :confirmed?, false) do |
||||||
|
base_query |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> where([tb], not is_nil(tb.commit_id) and tb.commit_id > 0) |
||||||
|
|> limit(10) |
||||||
|
else |
||||||
|
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) |
||||||
|
|
||||||
|
base_query |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> page_batches(paging_options) |
||||||
|
|> limit(^paging_options.page_size) |
||||||
|
end |
||||||
|
|
||||||
|
select_repo(options).all(query) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Receives a list of transactions from the `zksync_batch_l2_transactions` table included in a specific batch. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batch_number`: The number of batch which transactions were included to L1 as part of. |
||||||
|
- `options`: Options passed to `Chain.select_repo()`. (Optional) |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of `Explorer.Chain.ZkSync.BatchTransaction` belonging to the specified batch. |
||||||
|
""" |
||||||
|
@spec batch_transactions(non_neg_integer()) :: [Explorer.Chain.ZkSync.BatchTransaction] |
||||||
|
@spec batch_transactions(non_neg_integer(), keyword()) :: [Explorer.Chain.ZkSync.BatchTransaction] |
||||||
|
def batch_transactions(batch_number, options \\ []) |
||||||
|
when is_integer(batch_number) or |
||||||
|
is_binary(batch_number) do |
||||||
|
query = from(batch in BatchTransaction, where: batch.batch_number == ^batch_number) |
||||||
|
|
||||||
|
select_repo(options).all(query) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the number of the earliest batch in the `zksync_transaction_batches` table where the commitment transaction is not set. |
||||||
|
Batch #0 is filtered out, as it does not have a linked commitment transaction. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as committed or Batch #0 is the only available batch. |
||||||
|
""" |
||||||
|
@spec earliest_sealed_batch_number() :: non_neg_integer() | nil |
||||||
|
def earliest_sealed_batch_number do |
||||||
|
query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
select: tb.number, |
||||||
|
where: is_nil(tb.commit_id) and tb.number > 0, |
||||||
|
order_by: [asc: tb.number], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the number of the earliest batch in the `zksync_transaction_batches` table where the proving transaction is not set. |
||||||
|
Batch #0 is filtered out, as it does not have a linked proving transaction. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as proven or Batch #0 is the only available batch. |
||||||
|
""" |
||||||
|
@spec earliest_unproven_batch_number() :: non_neg_integer() | nil |
||||||
|
def earliest_unproven_batch_number do |
||||||
|
query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
select: tb.number, |
||||||
|
where: is_nil(tb.prove_id) and tb.number > 0, |
||||||
|
order_by: [asc: tb.number], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the number of the earliest batch in the `zksync_transaction_batches` table where the executing transaction is not set. |
||||||
|
Batch #0 is filtered out, as it does not have a linked executing transaction. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- The number of a batch if it exists, otherwise `nil`. `nil` could mean either no batches imported yet or all imported batches are marked as executed or Batch #0 is the only available batch. |
||||||
|
""" |
||||||
|
@spec earliest_unexecuted_batch_number() :: non_neg_integer() | nil |
||||||
|
def earliest_unexecuted_batch_number do |
||||||
|
query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
select: tb.number, |
||||||
|
where: is_nil(tb.execute_id) and tb.number > 0, |
||||||
|
order_by: [asc: tb.number], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the number of the oldest batch from the `zksync_transaction_batches` table. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- The number of a batch if it exists, otherwise `nil`. `nil` means that there is no batches imported yet. |
||||||
|
""" |
||||||
|
@spec oldest_available_batch_number() :: non_neg_integer() | nil |
||||||
|
def oldest_available_batch_number do |
||||||
|
query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
select: tb.number, |
||||||
|
order_by: [asc: tb.number], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the number of the youngest (the most recent) imported batch from the `zksync_transaction_batches` table. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- The number of a batch if it exists, otherwise `nil`. `nil` means that there is no batches imported yet. |
||||||
|
""" |
||||||
|
@spec latest_available_batch_number() :: non_neg_integer() | nil |
||||||
|
def latest_available_batch_number do |
||||||
|
query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
select: tb.number, |
||||||
|
order_by: [desc: tb.number], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Reads a list of L1 transactions by their hashes from the `zksync_lifecycle_l1_transactions` table. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `l1_tx_hashes`: A list of hashes to retrieve L1 transactions for. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of `Explorer.Chain.ZkSync.LifecycleTransaction` corresponding to the hashes from the input list. The output list may be smaller than the input list. |
||||||
|
""" |
||||||
|
@spec lifecycle_transactions(maybe_improper_list(binary(), [])) :: [Explorer.Chain.ZkSync.LifecycleTransaction] |
||||||
|
def lifecycle_transactions(l1_tx_hashes) do |
||||||
|
query = |
||||||
|
from( |
||||||
|
lt in LifecycleTransaction, |
||||||
|
select: {lt.hash, lt.id}, |
||||||
|
where: lt.hash in ^l1_tx_hashes |
||||||
|
) |
||||||
|
|
||||||
|
Repo.all(query, timeout: :infinity) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Determines the next index for the L1 transaction available in the `zksync_lifecycle_l1_transactions` table. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- The next available index. If there are no L1 transactions imported yet, it will return `1`. |
||||||
|
""" |
||||||
|
@spec next_id() :: non_neg_integer() |
||||||
|
def next_id do |
||||||
|
query = |
||||||
|
from(lt in LifecycleTransaction, |
||||||
|
select: lt.id, |
||||||
|
order_by: [desc: lt.id], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
last_id = |
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
|> Kernel.||(0) |
||||||
|
|
||||||
|
last_id + 1 |
||||||
|
end |
||||||
|
|
||||||
|
defp page_batches(query, %PagingOptions{key: nil}), do: query |
||||||
|
|
||||||
|
defp page_batches(query, %PagingOptions{key: {number}}) do |
||||||
|
from(tb in query, where: tb.number < ^number) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,83 @@ |
|||||||
|
defmodule Explorer.Chain.ZkSync.TransactionBatch do |
||||||
|
@moduledoc "Models a batch of transactions for ZkSync." |
||||||
|
|
||||||
|
use Explorer.Schema |
||||||
|
|
||||||
|
alias Explorer.Chain.{ |
||||||
|
Block, |
||||||
|
Hash, |
||||||
|
Wei |
||||||
|
} |
||||||
|
|
||||||
|
alias Explorer.Chain.ZkSync.{BatchTransaction, LifecycleTransaction} |
||||||
|
|
||||||
|
@optional_attrs ~w(commit_id prove_id execute_id)a |
||||||
|
|
||||||
|
@required_attrs ~w(number timestamp l1_tx_count l2_tx_count root_hash l1_gas_price l2_fair_gas_price start_block end_block)a |
||||||
|
|
||||||
|
@type t :: %__MODULE__{ |
||||||
|
number: non_neg_integer(), |
||||||
|
timestamp: DateTime.t(), |
||||||
|
l1_tx_count: non_neg_integer(), |
||||||
|
l2_tx_count: non_neg_integer(), |
||||||
|
root_hash: Hash.t(), |
||||||
|
l1_gas_price: Wei.t(), |
||||||
|
l2_fair_gas_price: Wei.t(), |
||||||
|
start_block: Block.block_number(), |
||||||
|
end_block: Block.block_number(), |
||||||
|
commit_id: non_neg_integer() | nil, |
||||||
|
commit_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil, |
||||||
|
prove_id: non_neg_integer() | nil, |
||||||
|
prove_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil, |
||||||
|
execute_id: non_neg_integer() | nil, |
||||||
|
execute_transaction: %Ecto.Association.NotLoaded{} | LifecycleTransaction.t() | nil |
||||||
|
} |
||||||
|
|
||||||
|
@primary_key {:number, :integer, autogenerate: false} |
||||||
|
schema "zksync_transaction_batches" do |
||||||
|
field(:timestamp, :utc_datetime_usec) |
||||||
|
field(:l1_tx_count, :integer) |
||||||
|
field(:l2_tx_count, :integer) |
||||||
|
field(:root_hash, Hash.Full) |
||||||
|
field(:l1_gas_price, Wei) |
||||||
|
field(:l2_fair_gas_price, Wei) |
||||||
|
field(:start_block, :integer) |
||||||
|
field(:end_block, :integer) |
||||||
|
|
||||||
|
belongs_to(:commit_transaction, LifecycleTransaction, |
||||||
|
foreign_key: :commit_id, |
||||||
|
references: :id, |
||||||
|
type: :integer |
||||||
|
) |
||||||
|
|
||||||
|
belongs_to(:prove_transaction, LifecycleTransaction, |
||||||
|
foreign_key: :prove_id, |
||||||
|
references: :id, |
||||||
|
type: :integer |
||||||
|
) |
||||||
|
|
||||||
|
belongs_to(:execute_transaction, LifecycleTransaction, |
||||||
|
foreign_key: :execute_id, |
||||||
|
references: :id, |
||||||
|
type: :integer |
||||||
|
) |
||||||
|
|
||||||
|
has_many(:l2_transactions, BatchTransaction, foreign_key: :batch_number) |
||||||
|
|
||||||
|
timestamps() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Validates that the `attrs` are valid. |
||||||
|
""" |
||||||
|
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() |
||||||
|
def changeset(%__MODULE__{} = batches, attrs \\ %{}) do |
||||||
|
batches |
||||||
|
|> cast(attrs, @required_attrs ++ @optional_attrs) |
||||||
|
|> validate_required(@required_attrs) |
||||||
|
|> foreign_key_constraint(:commit_id) |
||||||
|
|> foreign_key_constraint(:prove_id) |
||||||
|
|> foreign_key_constraint(:execute_id) |
||||||
|
|> unique_constraint(:number) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,17 @@ |
|||||||
|
defmodule Explorer.Repo.ZkSync.Migrations.MakeTransactionRSVOptional do |
||||||
|
use Ecto.Migration |
||||||
|
|
||||||
|
def change do |
||||||
|
alter table(:transactions) do |
||||||
|
modify(:r, :numeric, precision: 100, null: true) |
||||||
|
end |
||||||
|
|
||||||
|
alter table(:transactions) do |
||||||
|
modify(:s, :numeric, precision: 100, null: true) |
||||||
|
end |
||||||
|
|
||||||
|
alter table(:transactions) do |
||||||
|
modify(:v, :numeric, precision: 100, null: true) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,82 @@ |
|||||||
|
defmodule Explorer.Repo.ZkSync.Migrations.CreateZkSyncTables do |
||||||
|
use Ecto.Migration |
||||||
|
|
||||||
|
def change do |
||||||
|
create table(:zksync_lifecycle_l1_transactions, primary_key: false) do |
||||||
|
add(:id, :integer, null: false, primary_key: true) |
||||||
|
add(:hash, :bytea, null: false) |
||||||
|
add(:timestamp, :"timestamp without time zone", null: false) |
||||||
|
timestamps(null: false, type: :utc_datetime_usec) |
||||||
|
end |
||||||
|
|
||||||
|
create(unique_index(:zksync_lifecycle_l1_transactions, :hash)) |
||||||
|
|
||||||
|
create table(:zksync_transaction_batches, primary_key: false) do |
||||||
|
add(:number, :integer, null: false, primary_key: true) |
||||||
|
add(:timestamp, :"timestamp without time zone", null: false) |
||||||
|
add(:l1_tx_count, :integer, null: false) |
||||||
|
add(:l2_tx_count, :integer, null: false) |
||||||
|
add(:root_hash, :bytea, null: false) |
||||||
|
add(:l1_gas_price, :numeric, precision: 100, null: false) |
||||||
|
add(:l2_fair_gas_price, :numeric, precision: 100, null: false) |
||||||
|
add(:start_block, :integer, null: false) |
||||||
|
add(:end_block, :integer, null: false) |
||||||
|
|
||||||
|
add( |
||||||
|
:commit_id, |
||||||
|
references(:zksync_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer), |
||||||
|
null: true |
||||||
|
) |
||||||
|
|
||||||
|
add( |
||||||
|
:prove_id, |
||||||
|
references(:zksync_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer), |
||||||
|
null: true |
||||||
|
) |
||||||
|
|
||||||
|
add( |
||||||
|
:execute_id, |
||||||
|
references(:zksync_lifecycle_l1_transactions, on_delete: :restrict, on_update: :update_all, type: :integer), |
||||||
|
null: true |
||||||
|
) |
||||||
|
|
||||||
|
timestamps(null: false, type: :utc_datetime_usec) |
||||||
|
end |
||||||
|
|
||||||
|
create table(:zksync_batch_l2_transactions, primary_key: false) do |
||||||
|
add( |
||||||
|
:batch_number, |
||||||
|
references(:zksync_transaction_batches, |
||||||
|
column: :number, |
||||||
|
on_delete: :delete_all, |
||||||
|
on_update: :update_all, |
||||||
|
type: :integer |
||||||
|
), |
||||||
|
null: false |
||||||
|
) |
||||||
|
|
||||||
|
add(:hash, :bytea, null: false, primary_key: true) |
||||||
|
timestamps(null: false, type: :utc_datetime_usec) |
||||||
|
end |
||||||
|
|
||||||
|
create(index(:zksync_batch_l2_transactions, :batch_number)) |
||||||
|
|
||||||
|
create table(:zksync_batch_l2_blocks, primary_key: false) do |
||||||
|
add( |
||||||
|
:batch_number, |
||||||
|
references(:zksync_transaction_batches, |
||||||
|
column: :number, |
||||||
|
on_delete: :delete_all, |
||||||
|
on_update: :update_all, |
||||||
|
type: :integer |
||||||
|
), |
||||||
|
null: false |
||||||
|
) |
||||||
|
|
||||||
|
add(:hash, :bytea, null: false, primary_key: true) |
||||||
|
timestamps(null: false, type: :utc_datetime_usec) |
||||||
|
end |
||||||
|
|
||||||
|
create(index(:zksync_batch_l2_blocks, :batch_number)) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,242 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.BatchesStatusTracker do |
||||||
|
@moduledoc """ |
||||||
|
Updates batches statuses and imports historical batches to the `zksync_transaction_batches` table. |
||||||
|
|
||||||
|
Repetitiveness is supported by sending the following statuses every `recheck_interval` seconds: |
||||||
|
- `:check_committed`: Discover batches committed to L1 |
||||||
|
- `:check_proven`: Discover batches proven in L1 |
||||||
|
- `:check_executed`: Discover batches executed on L1 |
||||||
|
- `:recover_batches`: Recover missed batches found during the handling of the three previous messages |
||||||
|
- `:check_historical`: Check if the imported batches chain does not start with Batch #0 |
||||||
|
|
||||||
|
The initial message is `:check_committed`. If it is discovered that updating batches |
||||||
|
in the `zksync_transaction_batches` table is not possible because some are missing, |
||||||
|
`:recover_batches` is sent. The next messages are `:check_proven` and `:check_executed`. |
||||||
|
Both could result in sending `:recover_batches` as well. |
||||||
|
|
||||||
|
The logic ensures that every handler emits the `:recover_batches` message to return to |
||||||
|
the previous "progressing" state. If `:recover_batches` is called during handling `:check_committed`, |
||||||
|
it will be sent again after finishing batch recovery. Similar logic applies to `:check_proven` and |
||||||
|
`:check_executed`. |
||||||
|
|
||||||
|
The last message in the loop is `:check_historical`. |
||||||
|
|
||||||
|
|---------------------------------------------------------------------------| |
||||||
|
|-> check_committed -> check_proven -> check_executed -> check_historical ->| |
||||||
|
| ^ | ^ | ^ |
||||||
|
v | v | v | |
||||||
|
recover_batches recover_batches recover_batches |
||||||
|
|
||||||
|
If a batch status change is discovered during handling of `check_committed`, `check_proven`, |
||||||
|
or `check_executed` messages, the corresponding L1 transactions are imported and associated |
||||||
|
with the batches. Rollup transactions and blocks are not re-associated since it is assumed |
||||||
|
to be done by `Indexer.Fetcher.ZkSync.TransactionBatch` or during handling of |
||||||
|
the `recover_batches` message. |
||||||
|
|
||||||
|
The `recover_batches` handler downloads batch information from RPC and sets its actual L1 state |
||||||
|
by linking with L1 transactions. |
||||||
|
|
||||||
|
The `check_historical` message initiates the check if the tail of the batch chain is Batch 0. |
||||||
|
If the tail is missing, batches are downloaded from RPC in chunks of `batches_max_range` in every |
||||||
|
iteration. The batches are imported together with associated L1 transactions. |
||||||
|
""" |
||||||
|
|
||||||
|
use GenServer |
||||||
|
use Indexer.Fetcher |
||||||
|
|
||||||
|
require Logger |
||||||
|
|
||||||
|
# alias Explorer.Chain.Events.Publisher |
||||||
|
# TODO: publish event when new committed batches appear |
||||||
|
|
||||||
|
alias Indexer.Fetcher.ZkSync.Discovery.Workers |
||||||
|
alias Indexer.Fetcher.ZkSync.StatusTracking.{Committed, Executed, Proven} |
||||||
|
|
||||||
|
def child_spec(start_link_arguments) do |
||||||
|
spec = %{ |
||||||
|
id: __MODULE__, |
||||||
|
start: {__MODULE__, :start_link, start_link_arguments}, |
||||||
|
restart: :transient, |
||||||
|
type: :worker |
||||||
|
} |
||||||
|
|
||||||
|
Supervisor.child_spec(spec, []) |
||||||
|
end |
||||||
|
|
||||||
|
def start_link(args, gen_server_options \\ []) do |
||||||
|
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__)) |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def init(args) do |
||||||
|
Logger.metadata(fetcher: :zksync_batches_tracker) |
||||||
|
|
||||||
|
config_tracker = Application.get_all_env(:indexer)[Indexer.Fetcher.ZkSync.BatchesStatusTracker] |
||||||
|
l1_rpc = config_tracker[:zksync_l1_rpc] |
||||||
|
recheck_interval = config_tracker[:recheck_interval] |
||||||
|
config_fetcher = Application.get_all_env(:indexer)[Indexer.Fetcher.ZkSync.TransactionBatch] |
||||||
|
chunk_size = config_fetcher[:chunk_size] |
||||||
|
batches_max_range = config_fetcher[:batches_max_range] |
||||||
|
|
||||||
|
Process.send(self(), :check_committed, []) |
||||||
|
|
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
config: %{ |
||||||
|
json_l2_rpc_named_arguments: args[:json_rpc_named_arguments], |
||||||
|
json_l1_rpc_named_arguments: [ |
||||||
|
transport: EthereumJSONRPC.HTTP, |
||||||
|
transport_options: [ |
||||||
|
http: EthereumJSONRPC.HTTP.HTTPoison, |
||||||
|
url: l1_rpc, |
||||||
|
http_options: [ |
||||||
|
recv_timeout: :timer.minutes(10), |
||||||
|
timeout: :timer.minutes(10), |
||||||
|
hackney: [pool: :ethereum_jsonrpc] |
||||||
|
] |
||||||
|
] |
||||||
|
], |
||||||
|
recheck_interval: recheck_interval, |
||||||
|
chunk_size: chunk_size, |
||||||
|
batches_max_range: batches_max_range |
||||||
|
}, |
||||||
|
data: %{} |
||||||
|
}} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info({ref, _result}, state) do |
||||||
|
Process.demonitor(ref, [:flush]) |
||||||
|
{:noreply, state} |
||||||
|
end |
||||||
|
|
||||||
|
# Handles the `:check_historical` message to download historical batches from RPC if necessary and |
||||||
|
# import them to the `zksync_transaction_batches` table. The batches are imported together with L1 |
||||||
|
# transactions associations, rollup blocks and transactions. |
||||||
|
# Since it is the final handler in the loop, it schedules sending the `:check_committed` message |
||||||
|
# to initiate the next iteration. The sending of the message is delayed, taking into account |
||||||
|
# the time remaining after the previous handlers' execution. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `:check_historical`: the message triggering the handler |
||||||
|
# - `state`: current state of the fetcher containing both the fetcher configuration |
||||||
|
# and data re-used by different handlers. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `{:noreply, new_state}` where `new_state` contains `data` empty |
||||||
|
@impl GenServer |
||||||
|
def handle_info(:check_historical, state) |
||||||
|
when is_map(state) and is_map_key(state, :config) and is_map_key(state, :data) and |
||||||
|
is_map_key(state.config, :recheck_interval) and is_map_key(state.config, :batches_max_range) and |
||||||
|
is_map_key(state.config, :json_l2_rpc_named_arguments) and |
||||||
|
is_map_key(state.config, :chunk_size) do |
||||||
|
{handle_duration, _} = |
||||||
|
:timer.tc(&Workers.batches_catchup/1, [ |
||||||
|
%{ |
||||||
|
batches_max_range: state.config.batches_max_range, |
||||||
|
chunk_size: state.config.chunk_size, |
||||||
|
json_rpc_named_arguments: state.config.json_l2_rpc_named_arguments |
||||||
|
} |
||||||
|
]) |
||||||
|
|
||||||
|
Process.send_after( |
||||||
|
self(), |
||||||
|
:check_committed, |
||||||
|
max(:timer.seconds(state.config.recheck_interval) - div(update_duration(state.data, handle_duration), 1000), 0) |
||||||
|
) |
||||||
|
|
||||||
|
{:noreply, %{state | data: %{}}} |
||||||
|
end |
||||||
|
|
||||||
|
# Handles the `:recover_batches` message to download a set of batches from RPC and imports them |
||||||
|
# to the `zksync_transaction_batches` table. It is expected that the message is sent from handlers updating |
||||||
|
# batches statuses when they discover the absence of batches in the `zksync_transaction_batches` table. |
||||||
|
# The batches are imported together with L1 transactions associations, rollup blocks, and transactions. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `:recover_batches`: the message triggering the handler |
||||||
|
# - `state`: current state of the fetcher containing both the fetcher configuration |
||||||
|
# and data related to the batches recovery: |
||||||
|
# - `state.data.batches`: list of the batches to recover |
||||||
|
# - `state.data.switched_from`: the message to send after the batch recovery |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `{:noreply, new_state}` where `new_state` contains updated `duration` of the iteration |
||||||
|
@impl GenServer |
||||||
|
def handle_info(:recover_batches, state) |
||||||
|
when is_map(state) and is_map_key(state, :config) and is_map_key(state, :data) and |
||||||
|
is_map_key(state.config, :json_l2_rpc_named_arguments) and is_map_key(state.config, :chunk_size) and |
||||||
|
is_map_key(state.data, :batches) and is_map_key(state.data, :switched_from) do |
||||||
|
{handle_duration, _} = |
||||||
|
:timer.tc( |
||||||
|
&Workers.get_full_batches_info_and_import/2, |
||||||
|
[ |
||||||
|
state.data.batches, |
||||||
|
%{ |
||||||
|
chunk_size: state.config.chunk_size, |
||||||
|
json_rpc_named_arguments: state.config.json_l2_rpc_named_arguments |
||||||
|
} |
||||||
|
] |
||||||
|
) |
||||||
|
|
||||||
|
Process.send(self(), state.data.switched_from, []) |
||||||
|
|
||||||
|
{:noreply, %{state | data: %{duration: update_duration(state.data, handle_duration)}}} |
||||||
|
end |
||||||
|
|
||||||
|
# Handles `:check_committed`, `:check_proven`, and `:check_executed` messages to update the |
||||||
|
# statuses of batches by associating L1 transactions with them. For different messages, it invokes |
||||||
|
# different underlying functions due to different natures of discovering batches with changed status. |
||||||
|
# Another reason why statuses are being tracked differently is the different pace of status changes: |
||||||
|
# a batch is committed in a few minutes after sealing, proven in a few hours, and executed once in a day. |
||||||
|
# Depending on the value returned from the underlying function, either a message (`:check_proven`, |
||||||
|
# `:check_executed`, or `:check_historical`) to switch to the next status checker is sent, or a list |
||||||
|
# of batches to recover is provided together with `:recover_batches`. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `input`: one of `:check_committed`, `:check_proven`, and `:check_executed` |
||||||
|
# - `state`: the current state of the fetcher containing both the fetcher configuration |
||||||
|
# and data reused by different handlers. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `{:noreply, new_state}` where `new_state` contains the updated `duration` of the iteration, |
||||||
|
# could also contain the list of batches to recover and the message to return back to |
||||||
|
# the corresponding status update checker. |
||||||
|
@impl GenServer |
||||||
|
def handle_info(input, state) |
||||||
|
when input in [:check_committed, :check_proven, :check_executed] do |
||||||
|
{output, func} = |
||||||
|
case input do |
||||||
|
:check_committed -> {:check_proven, &Committed.look_for_batches_and_update/1} |
||||||
|
:check_proven -> {:check_executed, &Proven.look_for_batches_and_update/1} |
||||||
|
:check_executed -> {:check_historical, &Executed.look_for_batches_and_update/1} |
||||||
|
end |
||||||
|
|
||||||
|
{handle_duration, result} = :timer.tc(func, [state.config]) |
||||||
|
|
||||||
|
{switch_to, state_data} = |
||||||
|
case result do |
||||||
|
:ok -> |
||||||
|
{output, %{duration: update_duration(state.data, handle_duration)}} |
||||||
|
|
||||||
|
{:recovery_required, batches} -> |
||||||
|
{:recover_batches, |
||||||
|
%{ |
||||||
|
switched_from: input, |
||||||
|
batches: batches, |
||||||
|
duration: update_duration(state.data, handle_duration) |
||||||
|
}} |
||||||
|
end |
||||||
|
|
||||||
|
Process.send(self(), switch_to, []) |
||||||
|
{:noreply, %{state | data: state_data}} |
||||||
|
end |
||||||
|
|
||||||
|
defp update_duration(data, cur_duration) do |
||||||
|
if Map.has_key?(data, :duration) do |
||||||
|
data.duration + cur_duration |
||||||
|
else |
||||||
|
cur_duration |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,413 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.Discovery.BatchesData do |
||||||
|
@moduledoc """ |
||||||
|
Provides main functionality to extract data for batches and associated with them |
||||||
|
rollup blocks, rollup and L1 transactions. |
||||||
|
""" |
||||||
|
|
||||||
|
alias EthereumJSONRPC.Block.ByNumber |
||||||
|
alias Indexer.Fetcher.ZkSync.Utils.Rpc |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1, log_details_chunk_handling: 4] |
||||||
|
import EthereumJSONRPC, only: [quantity_to_integer: 1] |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Downloads batches, associates rollup blocks and transactions, and imports the results into the database. |
||||||
|
Data is retrieved from the RPC endpoint in chunks of `chunk_size`. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batches`: Either a tuple of two integers, `start_batch_number` and `end_batch_number`, defining |
||||||
|
the range of batches to receive, or a list of batch numbers, `batches_list`. |
||||||
|
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint, |
||||||
|
and `json_rpc_named_arguments` defining parameters for the RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `{batches_to_import, l2_blocks_to_import, l2_txs_to_import}` |
||||||
|
where |
||||||
|
- `batches_to_import` is a map of batches data |
||||||
|
- `l2_blocks_to_import` is a list of blocks associated with batches by batch numbers |
||||||
|
- `l2_txs_to_import` is a list of transactions associated with batches by batch numbers |
||||||
|
""" |
||||||
|
@spec extract_data_from_batches([integer()] | {integer(), integer()}, %{ |
||||||
|
:chunk_size => pos_integer(), |
||||||
|
:json_rpc_named_arguments => any(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: {map(), list(), list()} |
||||||
|
def extract_data_from_batches(batches, config) |
||||||
|
|
||||||
|
def extract_data_from_batches({start_batch_number, end_batch_number}, config) |
||||||
|
when is_integer(start_batch_number) and is_integer(end_batch_number) and |
||||||
|
is_map(config) do |
||||||
|
start_batch_number..end_batch_number |
||||||
|
|> Enum.to_list() |
||||||
|
|> do_extract_data_from_batches(config) |
||||||
|
end |
||||||
|
|
||||||
|
def extract_data_from_batches(batches_list, config) |
||||||
|
when is_list(batches_list) and |
||||||
|
is_map(config) do |
||||||
|
batches_list |
||||||
|
|> do_extract_data_from_batches(config) |
||||||
|
end |
||||||
|
|
||||||
|
defp do_extract_data_from_batches(batches_list, config) when is_list(batches_list) do |
||||||
|
initial_batches_to_import = collect_batches_details(batches_list, config) |
||||||
|
log_info("Collected details for #{length(Map.keys(initial_batches_to_import))} batches") |
||||||
|
|
||||||
|
batches_to_import = get_block_ranges(initial_batches_to_import, config) |
||||||
|
|
||||||
|
{l2_blocks_to_import, l2_txs_to_import} = get_l2_blocks_and_transactions(batches_to_import, config) |
||||||
|
log_info("Linked #{length(l2_blocks_to_import)} L2 blocks and #{length(l2_txs_to_import)} L2 transactions") |
||||||
|
|
||||||
|
{batches_to_import, l2_blocks_to_import, l2_txs_to_import} |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Collects all unique L1 transactions from the given list of batches, including transactions |
||||||
|
that change the status of a batch and their timestamps. |
||||||
|
|
||||||
|
**Note**: Every map describing an L1 transaction in the response is not ready for importing into |
||||||
|
the database since it does not contain `:id` elements. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batches`: A list of maps describing batches. Each map is expected to define the following |
||||||
|
elements: `commit_tx_hash`, `commit_timestamp`, `prove_tx_hash`, `prove_timestamp`, |
||||||
|
`executed_tx_hash`, `executed_timestamp`. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `l1_txs`: A map where keys are L1 transaction hashes, and values are maps containing |
||||||
|
transaction hashes and timestamps. |
||||||
|
""" |
||||||
|
@spec collect_l1_transactions(list()) :: map() |
||||||
|
def collect_l1_transactions(batches) |
||||||
|
when is_list(batches) do |
||||||
|
l1_txs = |
||||||
|
batches |
||||||
|
|> Enum.reduce(%{}, fn batch, l1_txs -> |
||||||
|
[ |
||||||
|
%{hash: batch.commit_tx_hash, timestamp: batch.commit_timestamp}, |
||||||
|
%{hash: batch.prove_tx_hash, timestamp: batch.prove_timestamp}, |
||||||
|
%{hash: batch.executed_tx_hash, timestamp: batch.executed_timestamp} |
||||||
|
] |
||||||
|
|> Enum.reduce(l1_txs, fn l1_tx, acc -> |
||||||
|
# checks if l1_tx is not empty and adds to acc |
||||||
|
add_l1_tx_to_list(acc, l1_tx) |
||||||
|
end) |
||||||
|
end) |
||||||
|
|
||||||
|
log_info("Collected #{length(Map.keys(l1_txs))} L1 hashes") |
||||||
|
|
||||||
|
l1_txs |
||||||
|
end |
||||||
|
|
||||||
|
defp add_l1_tx_to_list(l1_txs, l1_tx) do |
||||||
|
if l1_tx.hash != Rpc.get_binary_zero_hash() do |
||||||
|
Map.put(l1_txs, l1_tx.hash, l1_tx) |
||||||
|
else |
||||||
|
l1_txs |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
# Divides the list of batch numbers into chunks of size `chunk_size` to combine |
||||||
|
# `zks_getL1BatchDetails` calls in one chunk together. To simplify further handling, |
||||||
|
# each call is combined with the batch number in the JSON request identifier field. |
||||||
|
# This allows parsing and associating every response with a particular batch, producing |
||||||
|
# a list of maps describing the batches, ready for further handling. |
||||||
|
# |
||||||
|
# **Note**: The batches in the resulting map are not ready for importing into the DB. L1 transaction |
||||||
|
# indices as well as the rollup blocks range must be added, and then batch descriptions |
||||||
|
# must be pruned (see Indexer.Fetcher.ZkSync.Utils.Db.prune_json_batch/1). |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `batches_list`: A list of batch numbers. |
||||||
|
# - `config`: A map containing `chunk_size` specifying the number of `zks_getL1BatchDetails` in |
||||||
|
# one HTTP request, and `json_rpc_named_arguments` describing parameters for |
||||||
|
# RPC connection. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `batches_details`: A map where keys are batch numbers, and values are maps produced |
||||||
|
# after parsing responses of `zks_getL1BatchDetails` calls. |
||||||
|
defp collect_batches_details( |
||||||
|
batches_list, |
||||||
|
%{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config |
||||||
|
) |
||||||
|
when is_list(batches_list) do |
||||||
|
batches_list_length = length(batches_list) |
||||||
|
|
||||||
|
{batches_details, _} = |
||||||
|
batches_list |
||||||
|
|> Enum.chunk_every(chunk_size) |
||||||
|
|> Enum.reduce({%{}, 0}, fn chunk, {details, a} -> |
||||||
|
log_details_chunk_handling("Collecting details", chunk, a * chunk_size, batches_list_length) |
||||||
|
|
||||||
|
requests = |
||||||
|
chunk |
||||||
|
|> Enum.map(fn batch_number -> |
||||||
|
EthereumJSONRPC.request(%{ |
||||||
|
id: batch_number, |
||||||
|
method: "zks_getL1BatchDetails", |
||||||
|
params: [batch_number] |
||||||
|
}) |
||||||
|
end) |
||||||
|
|
||||||
|
details = |
||||||
|
requests |
||||||
|
|> Rpc.fetch_batches_details(json_rpc_named_arguments) |
||||||
|
|> Enum.reduce( |
||||||
|
details, |
||||||
|
fn resp, details -> |
||||||
|
Map.put(details, resp.id, Rpc.transform_batch_details_to_map(resp.result)) |
||||||
|
end |
||||||
|
) |
||||||
|
|
||||||
|
{details, a + 1} |
||||||
|
end) |
||||||
|
|
||||||
|
batches_details |
||||||
|
end |
||||||
|
|
||||||
|
# Extends each batch description with the block numbers specifying the start and end of |
||||||
|
# a range of blocks included in the batch. The block ranges are obtained through the RPC call |
||||||
|
# `zks_getL1BatchBlockRange`. The calls are combined in chunks of `chunk_size`. To distinguish |
||||||
|
# each call in the chunk, they are combined with the batch number in the JSON request |
||||||
|
# identifier field. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `batches`: A map of batch descriptions. |
||||||
|
# - `config`: A map containing `chunk_size`, specifying the number of `zks_getL1BatchBlockRange` |
||||||
|
# in one HTTP request, and `json_rpc_named_arguments` describing parameters for |
||||||
|
# RPC connection. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `updated_batches`: A map of batch descriptions where each description is updated with |
||||||
|
# a range (elements `:start_block` and `:end_block`) of rollup blocks included in the batch. |
||||||
|
defp get_block_ranges( |
||||||
|
batches, |
||||||
|
%{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config |
||||||
|
) |
||||||
|
when is_map(batches) do |
||||||
|
keys = Map.keys(batches) |
||||||
|
batches_list_length = length(keys) |
||||||
|
|
||||||
|
{updated_batches, _} = |
||||||
|
keys |
||||||
|
|> Enum.chunk_every(chunk_size) |
||||||
|
|> Enum.reduce({batches, 0}, fn batches_chunk, {batches_with_block_ranges, a} -> |
||||||
|
log_details_chunk_handling("Collecting block ranges", batches_chunk, a * chunk_size, batches_list_length) |
||||||
|
|
||||||
|
{request_block_ranges_for_batches(batches_chunk, batches, batches_with_block_ranges, json_rpc_named_arguments), |
||||||
|
a + 1} |
||||||
|
end) |
||||||
|
|
||||||
|
updated_batches |
||||||
|
end |
||||||
|
|
||||||
|
# For a given list of rollup batch numbers, this function builds a list of requests |
||||||
|
# to `zks_getL1BatchBlockRange`, executes them, and extends the batches' descriptions with |
||||||
|
# ranges of rollup blocks associated with each batch. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `batches_numbers`: A list with batch numbers. |
||||||
|
# - `batches_src`: A list containing original batches descriptions. |
||||||
|
# - `batches_dst`: A map with extended batch descriptions containing rollup block ranges. |
||||||
|
# - `json_rpc_named_arguments`: Describes parameters for RPC connection. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - An updated version of `batches_dst` with new entities containing rollup block ranges. |
||||||
|
defp request_block_ranges_for_batches(batches_numbers, batches_src, batches_dst, json_rpc_named_arguments) do |
||||||
|
batches_numbers |
||||||
|
|> Enum.reduce([], fn batch_number, requests -> |
||||||
|
batch = Map.get(batches_src, batch_number) |
||||||
|
# Prepare requests list to get blocks ranges |
||||||
|
case is_nil(batch.start_block) or is_nil(batch.end_block) do |
||||||
|
true -> |
||||||
|
[ |
||||||
|
EthereumJSONRPC.request(%{ |
||||||
|
id: batch_number, |
||||||
|
method: "zks_getL1BatchBlockRange", |
||||||
|
params: [batch_number] |
||||||
|
}) |
||||||
|
| requests |
||||||
|
] |
||||||
|
|
||||||
|
false -> |
||||||
|
requests |
||||||
|
end |
||||||
|
end) |
||||||
|
|> Rpc.fetch_blocks_ranges(json_rpc_named_arguments) |
||||||
|
|> Enum.reduce(batches_dst, fn resp, updated_batches -> |
||||||
|
Map.update!(updated_batches, resp.id, fn batch -> |
||||||
|
[start_block, end_block] = resp.result |
||||||
|
|
||||||
|
Map.merge(batch, %{ |
||||||
|
start_block: quantity_to_integer(start_block), |
||||||
|
end_block: quantity_to_integer(end_block) |
||||||
|
}) |
||||||
|
end) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
# Unfolds the ranges of rollup blocks in each batch description, makes RPC `eth_getBlockByNumber` calls, |
||||||
|
# and builds two lists: a list of rollup blocks associated with each batch and a list of rollup transactions |
||||||
|
# associated with each batch. RPC calls are made in chunks of `chunk_size`. To distinguish |
||||||
|
# each call in the chunk, they are combined with the block number in the JSON request |
||||||
|
# identifier field. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `batches`: A map of batch descriptions. Each description must contain `start_block` and |
||||||
|
# `end_block`, specifying the range of blocks associated with the batch. |
||||||
|
# - `config`: A map containing `chunk_size`, specifying the number of `eth_getBlockByNumber` |
||||||
|
# in one HTTP request, and `json_rpc_named_arguments` describing parameters for |
||||||
|
# RPC connection. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - {l2_blocks_to_import, l2_txs_to_import}, where |
||||||
|
# - `l2_blocks_to_import` contains a list of all rollup blocks with their associations with |
||||||
|
# the provided batches. The association is a map with the block hash and the batch number. |
||||||
|
# - `l2_txs_to_import` contains a list of all rollup transactions with their associations |
||||||
|
# with the provided batches. The association is a map with the transaction hash and |
||||||
|
# the batch number. |
||||||
|
defp get_l2_blocks_and_transactions( |
||||||
|
batches, |
||||||
|
%{json_rpc_named_arguments: json_rpc_named_arguments, chunk_size: chunk_size} = _config |
||||||
|
) do |
||||||
|
# Extracts the rollup block range for every batch, unfolds it and |
||||||
|
# build chunks of `eth_getBlockByNumber` calls |
||||||
|
{blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size} = |
||||||
|
batches |
||||||
|
|> Map.keys() |
||||||
|
|> Enum.reduce({%{}, [], [], 0}, fn batch_number, cur_batch_acc -> |
||||||
|
batch = Map.get(batches, batch_number) |
||||||
|
|
||||||
|
batch.start_block..batch.end_block |
||||||
|
|> Enum.chunk_every(chunk_size) |
||||||
|
|> Enum.reduce(cur_batch_acc, fn blocks_range, cur_chunk_acc -> |
||||||
|
build_blocks_map_and_chunks_of_rpc_requests(batch_number, blocks_range, cur_chunk_acc, chunk_size) |
||||||
|
end) |
||||||
|
end) |
||||||
|
|
||||||
|
# After the last iteration of the reduce loop it is a valid case |
||||||
|
# when the calls from the last chunk are not in the chunks list, |
||||||
|
# so it is appended |
||||||
|
finalized_chunked_requests = |
||||||
|
if cur_chunk_size > 0 do |
||||||
|
[cur_chunk | chunked_requests] |
||||||
|
else |
||||||
|
chunked_requests |
||||||
|
end |
||||||
|
|
||||||
|
# The chunks requests are sent to the RPC node and parsed to |
||||||
|
# extract rollup block hashes and rollup transactions. |
||||||
|
{blocks_associations, l2_txs_to_import} = |
||||||
|
finalized_chunked_requests |
||||||
|
|> Enum.reduce({blocks_to_batches, []}, fn requests, {blocks, l2_txs} -> |
||||||
|
requests |
||||||
|
|> Rpc.fetch_blocks_details(json_rpc_named_arguments) |
||||||
|
|> extract_block_hash_and_transactions_list(blocks, l2_txs) |
||||||
|
end) |
||||||
|
|
||||||
|
# Check that amount of received transactions for a batch is correct |
||||||
|
batches |
||||||
|
|> Map.keys() |
||||||
|
|> Enum.each(fn batch_number -> |
||||||
|
batch = Map.get(batches, batch_number) |
||||||
|
txs_in_batch = batch.l1_tx_count + batch.l2_tx_count |
||||||
|
|
||||||
|
^txs_in_batch = |
||||||
|
Enum.count(l2_txs_to_import, fn tx -> |
||||||
|
tx.batch_number == batch_number |
||||||
|
end) |
||||||
|
end) |
||||||
|
|
||||||
|
{Map.values(blocks_associations), l2_txs_to_import} |
||||||
|
end |
||||||
|
|
||||||
|
# For a given list of rollup block numbers, this function extends: |
||||||
|
# - a map containing the linkage between rollup block numbers and batch numbers |
||||||
|
# - a list of chunks of `eth_getBlockByNumber` requests |
||||||
|
# - an uncompleted chunk of `eth_getBlockByNumber` requests |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `batch_number`: The number of the batch to which the list of rollup blocks is linked. |
||||||
|
# - `blocks_numbers`: A list of rollup block numbers. |
||||||
|
# - `cur_chunk_acc`: The current state of the accumulator containing: |
||||||
|
# - the current state of the map containing the linkage between rollup block numbers and batch numbers |
||||||
|
# - the current state of the list of chunks of `eth_getBlockByNumber` requests |
||||||
|
# - the current state of the uncompleted chunk of `eth_getBlockByNumber` requests |
||||||
|
# - the size of the uncompleted chunk |
||||||
|
# - `chunk_size`: The maximum size of the chunk of `eth_getBlockByNumber` requests |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size}, where: |
||||||
|
# - `blocks_to_batches`: An updated map with new blocks added. |
||||||
|
# - `chunked_requests`: An updated list of lists of `eth_getBlockByNumber` requests. |
||||||
|
# - `cur_chunk`: An uncompleted chunk of `eth_getBlockByNumber` requests or an empty list. |
||||||
|
# - `cur_chunk_size`: The size of the uncompleted chunk. |
||||||
|
defp build_blocks_map_and_chunks_of_rpc_requests(batch_number, blocks_numbers, cur_chunk_acc, chunk_size) do |
||||||
|
blocks_numbers |
||||||
|
|> Enum.reduce(cur_chunk_acc, fn block_number, {blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size} -> |
||||||
|
blocks_to_batches = Map.put(blocks_to_batches, block_number, %{batch_number: batch_number}) |
||||||
|
|
||||||
|
cur_chunk = [ |
||||||
|
ByNumber.request( |
||||||
|
%{ |
||||||
|
id: block_number, |
||||||
|
number: block_number |
||||||
|
}, |
||||||
|
false |
||||||
|
) |
||||||
|
| cur_chunk |
||||||
|
] |
||||||
|
|
||||||
|
if cur_chunk_size + 1 == chunk_size do |
||||||
|
{blocks_to_batches, [cur_chunk | chunked_requests], [], 0} |
||||||
|
else |
||||||
|
{blocks_to_batches, chunked_requests, cur_chunk, cur_chunk_size + 1} |
||||||
|
end |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
# Parses responses from `eth_getBlockByNumber` calls and extracts the block hash and the |
||||||
|
# transactions lists. The block hash and transaction hashes are used to build associations |
||||||
|
# with the corresponding batches by utilizing their numbers. |
||||||
|
# |
||||||
|
# This function is not part of the `Indexer.Fetcher.ZkSync.Utils.Rpc` module since the resulting |
||||||
|
# lists are too specific for further import to the database. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `json_responses`: A list of responses to `eth_getBlockByNumber` calls. |
||||||
|
# - `l2_blocks`: A map of accumulated associations between rollup blocks and batches. |
||||||
|
# - `l2_txs`: A list of accumulated associations between rollup transactions and batches. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - {l2_blocks, l2_txs}, where |
||||||
|
# - `l2_blocks`: Updated map of accumulated associations between rollup blocks and batches. |
||||||
|
# - `l2_txs`: Updated list of accumulated associations between rollup transactions and batches. |
||||||
|
defp extract_block_hash_and_transactions_list(json_responses, l2_blocks, l2_txs) do |
||||||
|
json_responses |
||||||
|
|> Enum.reduce({l2_blocks, l2_txs}, fn resp, {l2_blocks, l2_txs} -> |
||||||
|
{block, l2_blocks} = |
||||||
|
Map.get_and_update(l2_blocks, resp.id, fn block -> |
||||||
|
{block, Map.put(block, :hash, Map.get(resp.result, "hash"))} |
||||||
|
end) |
||||||
|
|
||||||
|
l2_txs = |
||||||
|
case Map.get(resp.result, "transactions") do |
||||||
|
nil -> |
||||||
|
l2_txs |
||||||
|
|
||||||
|
new_txs -> |
||||||
|
Enum.reduce(new_txs, l2_txs, fn l2_tx_hash, l2_txs -> |
||||||
|
[ |
||||||
|
%{ |
||||||
|
batch_number: block.batch_number, |
||||||
|
hash: l2_tx_hash |
||||||
|
} |
||||||
|
| l2_txs |
||||||
|
] |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
{l2_blocks, l2_txs} |
||||||
|
end) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,163 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.Discovery.Workers do |
||||||
|
@moduledoc """ |
||||||
|
Provides functions to download a set of batches from RPC and import them to DB. |
||||||
|
""" |
||||||
|
|
||||||
|
alias Indexer.Fetcher.ZkSync.Utils.Db |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.Discovery.BatchesData, |
||||||
|
only: [ |
||||||
|
collect_l1_transactions: 1, |
||||||
|
extract_data_from_batches: 2 |
||||||
|
] |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1] |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Downloads minimal batches data (batch, associated rollup blocks and transactions hashes) from RPC |
||||||
|
and imports them to the DB. Data is retrieved from the RPC endpoint in chunks of `chunk_size`. |
||||||
|
Import of associated L1 transactions does not happen, assuming that the batch import happens regularly |
||||||
|
enough and last downloaded batches does not contain L1 associations anyway. |
||||||
|
Later `Indexer.Fetcher.ZkSync.BatchesStatusTracker` will update any batch state changes and |
||||||
|
import required L1 transactions. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `start_batch_number`: The first batch in the range to download. |
||||||
|
- `end_batch_number`: The last batch in the range to download. |
||||||
|
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint, |
||||||
|
and `json_rpc_named_arguments` defining parameters for the RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `:ok` |
||||||
|
""" |
||||||
|
@spec get_minimal_batches_info_and_import(non_neg_integer(), non_neg_integer(), %{ |
||||||
|
:chunk_size => integer(), |
||||||
|
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: :ok |
||||||
|
def get_minimal_batches_info_and_import(start_batch_number, end_batch_number, config) |
||||||
|
when is_integer(start_batch_number) and |
||||||
|
is_integer(end_batch_number) and |
||||||
|
(is_map(config) and is_map_key(config, :json_rpc_named_arguments) and |
||||||
|
is_map_key(config, :chunk_size)) do |
||||||
|
{batches_to_import, l2_blocks_to_import, l2_txs_to_import} = |
||||||
|
extract_data_from_batches({start_batch_number, end_batch_number}, config) |
||||||
|
|
||||||
|
batches_list_to_import = |
||||||
|
batches_to_import |
||||||
|
|> Map.values() |
||||||
|
|> Enum.reduce([], fn batch, batches_list -> |
||||||
|
[Db.prune_json_batch(batch) | batches_list] |
||||||
|
end) |
||||||
|
|
||||||
|
Db.import_to_db( |
||||||
|
batches_list_to_import, |
||||||
|
[], |
||||||
|
l2_txs_to_import, |
||||||
|
l2_blocks_to_import |
||||||
|
) |
||||||
|
|
||||||
|
:ok |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Downloads batches, associates L1 transactions, rollup blocks and transactions with the given list of batch numbers, |
||||||
|
and imports the results into the database. Data is retrieved from the RPC endpoint in chunks of `chunk_size`. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batches_numbers_list`: List of batch numbers to be retrieved. |
||||||
|
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from the RPC endpoint, |
||||||
|
and `json_rpc_named_arguments` defining parameters for the RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `:ok` |
||||||
|
""" |
||||||
|
@spec get_full_batches_info_and_import([integer()], %{ |
||||||
|
:chunk_size => integer(), |
||||||
|
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: :ok |
||||||
|
def get_full_batches_info_and_import(batches_numbers_list, config) |
||||||
|
when is_list(batches_numbers_list) and |
||||||
|
(is_map(config) and is_map_key(config, :json_rpc_named_arguments) and |
||||||
|
is_map_key(config, :chunk_size)) do |
||||||
|
# Collect batches and linked L2 blocks and transaction |
||||||
|
{batches_to_import, l2_blocks_to_import, l2_txs_to_import} = extract_data_from_batches(batches_numbers_list, config) |
||||||
|
|
||||||
|
# Collect L1 transactions associated with batches |
||||||
|
l1_txs = |
||||||
|
batches_to_import |
||||||
|
|> Map.values() |
||||||
|
|> collect_l1_transactions() |
||||||
|
|> Db.get_indices_for_l1_transactions() |
||||||
|
|
||||||
|
# Update batches with l1 transactions indices and prune unnecessary fields |
||||||
|
batches_list_to_import = |
||||||
|
batches_to_import |
||||||
|
|> Map.values() |
||||||
|
|> Enum.reduce([], fn batch, batches -> |
||||||
|
[ |
||||||
|
batch |
||||||
|
|> Map.put(:commit_id, get_l1_tx_id_by_hash(l1_txs, batch.commit_tx_hash)) |
||||||
|
|> Map.put(:prove_id, get_l1_tx_id_by_hash(l1_txs, batch.prove_tx_hash)) |
||||||
|
|> Map.put(:execute_id, get_l1_tx_id_by_hash(l1_txs, batch.executed_tx_hash)) |
||||||
|
|> Db.prune_json_batch() |
||||||
|
| batches |
||||||
|
] |
||||||
|
end) |
||||||
|
|
||||||
|
Db.import_to_db( |
||||||
|
batches_list_to_import, |
||||||
|
Map.values(l1_txs), |
||||||
|
l2_txs_to_import, |
||||||
|
l2_blocks_to_import |
||||||
|
) |
||||||
|
|
||||||
|
:ok |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Retrieves the minimal batch number from the database. If the minimum batch number is not zero, |
||||||
|
downloads `batches_max_range` batches older than the retrieved batch, along with associated |
||||||
|
L1 transactions, rollup blocks, and transactions, and imports everything to the database. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `config`: Configuration containing `chunk_size` to limit the amount of data requested from |
||||||
|
the RPC endpoint and `json_rpc_named_arguments` defining parameters for the |
||||||
|
RPC connection, `batches_max_range` defines how many of older batches must be downloaded. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `:ok` |
||||||
|
""" |
||||||
|
@spec batches_catchup(%{ |
||||||
|
:batches_max_range => integer(), |
||||||
|
:chunk_size => integer(), |
||||||
|
:json_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: :ok |
||||||
|
def batches_catchup(config) |
||||||
|
when is_map(config) and is_map_key(config, :json_rpc_named_arguments) and |
||||||
|
is_map_key(config, :batches_max_range) and |
||||||
|
is_map_key(config, :chunk_size) do |
||||||
|
oldest_batch_number = Db.get_earliest_batch_number() |
||||||
|
|
||||||
|
if not is_nil(oldest_batch_number) && oldest_batch_number > 0 do |
||||||
|
log_info("The oldest batch number is not zero. Historical baches will be fetched.") |
||||||
|
start_batch_number = max(0, oldest_batch_number - config.batches_max_range) |
||||||
|
end_batch_number = oldest_batch_number - 1 |
||||||
|
|
||||||
|
start_batch_number..end_batch_number |
||||||
|
|> Enum.to_list() |
||||||
|
|> get_full_batches_info_and_import(config) |
||||||
|
end |
||||||
|
|
||||||
|
:ok |
||||||
|
end |
||||||
|
|
||||||
|
defp get_l1_tx_id_by_hash(l1_txs, hash) do |
||||||
|
l1_txs |
||||||
|
|> Map.get(hash) |
||||||
|
|> Kernel.||(%{id: nil}) |
||||||
|
|> Map.get(:id) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,78 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.StatusTracking.Committed do |
||||||
|
@moduledoc """ |
||||||
|
Functionality to discover committed batches |
||||||
|
""" |
||||||
|
|
||||||
|
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc} |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils, |
||||||
|
only: [ |
||||||
|
check_if_batch_status_changed: 3, |
||||||
|
associate_and_import_or_prepare_for_recovery: 4 |
||||||
|
] |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1] |
||||||
|
|
||||||
|
# keccak256("BlockCommit(uint256,bytes32,bytes32)") |
||||||
|
@block_commit_event "0x8f2916b2f2d78cc5890ead36c06c0f6d5d112c7e103589947e8e2f0d6eddb763" |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Checks if the oldest uncommitted batch in the database has the associated L1 commitment transaction |
||||||
|
by requesting new batch details from RPC. If so, analyzes the `BlockCommit` event emitted by |
||||||
|
the transaction to explore all the batches committed by it. For all discovered batches, it updates |
||||||
|
the database with new associations, importing information about L1 transactions. |
||||||
|
If it is found that some of the discovered batches are absent in the database, the function |
||||||
|
interrupts and returns the list of batch numbers that can be attempted to be recovered. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `config`: Configuration containing `json_l1_rpc_named_arguments` and |
||||||
|
`json_l2_rpc_named_arguments` defining parameters for the RPC connections. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `:ok` if no new committed batches are found, or if all found batches and the corresponding L1 |
||||||
|
transactions are imported successfully. |
||||||
|
- `{:recovery_required, batches_to_recover}` if the absence of new committed batches is |
||||||
|
discovered; `batches_to_recover` contains the list of batch numbers. |
||||||
|
""" |
||||||
|
@spec look_for_batches_and_update(%{ |
||||||
|
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
:json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: :ok | {:recovery_required, list()} |
||||||
|
def look_for_batches_and_update( |
||||||
|
%{ |
||||||
|
json_l1_rpc_named_arguments: json_l1_rpc_named_arguments, |
||||||
|
json_l2_rpc_named_arguments: json_l2_rpc_named_arguments |
||||||
|
} = _config |
||||||
|
) do |
||||||
|
case Db.get_earliest_sealed_batch_number() do |
||||||
|
nil -> |
||||||
|
:ok |
||||||
|
|
||||||
|
expected_batch_number -> |
||||||
|
log_info("Checking if the batch #{expected_batch_number} was committed") |
||||||
|
|
||||||
|
{next_action, tx_hash, l1_txs} = |
||||||
|
check_if_batch_status_changed(expected_batch_number, :commit_tx, json_l2_rpc_named_arguments) |
||||||
|
|
||||||
|
case next_action do |
||||||
|
:skip -> |
||||||
|
:ok |
||||||
|
|
||||||
|
:look_for_batches -> |
||||||
|
log_info("The batch #{expected_batch_number} looks like committed") |
||||||
|
commit_tx_receipt = Rpc.fetch_tx_receipt_by_hash(tx_hash, json_l1_rpc_named_arguments) |
||||||
|
batches_numbers_from_rpc = get_committed_batches_from_logs(commit_tx_receipt["logs"]) |
||||||
|
|
||||||
|
associate_and_import_or_prepare_for_recovery(batches_numbers_from_rpc, l1_txs, tx_hash, :commit_id) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp get_committed_batches_from_logs(logs) do |
||||||
|
committed_batches = Rpc.filter_logs_and_extract_topic_at(logs, @block_commit_event, 1) |
||||||
|
log_info("Discovered #{length(committed_batches)} committed batches in the commitment tx") |
||||||
|
|
||||||
|
committed_batches |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,173 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils do |
||||||
|
@moduledoc """ |
||||||
|
Common functions for status changes trackers |
||||||
|
""" |
||||||
|
|
||||||
|
alias Explorer.Chain.ZkSync.Reader |
||||||
|
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc} |
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_warning: 1] |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches the details of the batch with the given number and checks if the representation of |
||||||
|
the same batch in the database refers to the same commitment, proving, or executing transaction |
||||||
|
depending on `tx_type`. If the transaction state changes, the new transaction is prepared for |
||||||
|
import to the database. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batch_number`: the number of the batch to check L1 transaction state. |
||||||
|
- `tx_type`: a type of the transaction to check, one of :commit_tx, :execute_tx, or :prove_tx. |
||||||
|
- `json_l2_rpc_named_arguments`: parameters for the RPC connections. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `{:look_for_batches, l1_tx_hash, l1_txs}` where |
||||||
|
- `l1_tx_hash` is the hash of the L1 transaction. |
||||||
|
- `l1_txs` is a map containing the transaction hash as a key, and values are maps |
||||||
|
with transaction hashes and transaction timestamps. |
||||||
|
- `{:skip, "", %{}}` means the batch is not found in the database or the state of the transaction |
||||||
|
in the batch representation is the same as the state of the transaction for the batch |
||||||
|
received from RPC. |
||||||
|
""" |
||||||
|
@spec check_if_batch_status_changed( |
||||||
|
binary() | non_neg_integer(), |
||||||
|
:commit_tx | :execute_tx | :prove_tx, |
||||||
|
EthereumJSONRPC.json_rpc_named_arguments() |
||||||
|
) :: {:look_for_batches, any(), any()} | {:skip, <<>>, %{}} |
||||||
|
def check_if_batch_status_changed(batch_number, tx_type, json_l2_rpc_named_arguments) |
||||||
|
when (is_binary(batch_number) or is_integer(batch_number)) and |
||||||
|
tx_type in [:commit_tx, :prove_tx, :execute_tx] and |
||||||
|
is_list(json_l2_rpc_named_arguments) do |
||||||
|
batch_from_rpc = Rpc.fetch_batch_details_by_batch_number(batch_number, json_l2_rpc_named_arguments) |
||||||
|
|
||||||
|
status_changed_or_error = |
||||||
|
case Reader.batch( |
||||||
|
batch_number, |
||||||
|
necessity_by_association: %{ |
||||||
|
get_association(tx_type) => :optional |
||||||
|
} |
||||||
|
) do |
||||||
|
{:ok, batch_from_db} -> transactions_of_batch_changed?(batch_from_db, batch_from_rpc, tx_type) |
||||||
|
{:error, :not_found} -> :error |
||||||
|
end |
||||||
|
|
||||||
|
l1_tx = get_l1_tx_from_batch(batch_from_rpc, tx_type) |
||||||
|
|
||||||
|
if l1_tx.hash != Rpc.get_binary_zero_hash() and status_changed_or_error in [true, :error] do |
||||||
|
l1_txs = Db.get_indices_for_l1_transactions(%{l1_tx.hash => l1_tx}) |
||||||
|
|
||||||
|
{:look_for_batches, l1_tx.hash, l1_txs} |
||||||
|
else |
||||||
|
{:skip, "", %{}} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp get_association(tx_type) do |
||||||
|
case tx_type do |
||||||
|
:commit_tx -> :commit_transaction |
||||||
|
:prove_tx -> :prove_transaction |
||||||
|
:execute_tx -> :execute_transaction |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp transactions_of_batch_changed?(batch_db, batch_json, tx_type) do |
||||||
|
tx_hash_json = |
||||||
|
case tx_type do |
||||||
|
:commit_tx -> batch_json.commit_tx_hash |
||||||
|
:prove_tx -> batch_json.prove_tx_hash |
||||||
|
:execute_tx -> batch_json.executed_tx_hash |
||||||
|
end |
||||||
|
|
||||||
|
tx_hash_db = |
||||||
|
case tx_type do |
||||||
|
:commit_tx -> batch_db.commit_transaction |
||||||
|
:prove_tx -> batch_db.prove_transaction |
||||||
|
:execute_tx -> batch_db.execute_transaction |
||||||
|
end |
||||||
|
|
||||||
|
tx_hash_db_bytes = |
||||||
|
if is_nil(tx_hash_db) do |
||||||
|
Rpc.get_binary_zero_hash() |
||||||
|
else |
||||||
|
tx_hash_db.hash.bytes |
||||||
|
end |
||||||
|
|
||||||
|
tx_hash_json != tx_hash_db_bytes |
||||||
|
end |
||||||
|
|
||||||
|
defp get_l1_tx_from_batch(batch_from_rpc, tx_type) do |
||||||
|
case tx_type do |
||||||
|
:commit_tx -> %{hash: batch_from_rpc.commit_tx_hash, timestamp: batch_from_rpc.commit_timestamp} |
||||||
|
:prove_tx -> %{hash: batch_from_rpc.prove_tx_hash, timestamp: batch_from_rpc.prove_timestamp} |
||||||
|
:execute_tx -> %{hash: batch_from_rpc.executed_tx_hash, timestamp: batch_from_rpc.executed_timestamp} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Receives batches from the database, establishes an association between each batch and |
||||||
|
the corresponding L1 transactions, and imports batches and L1 transactions into the database. |
||||||
|
If the number of batches returned from the database does not match the requested batches, |
||||||
|
the initial list of batch numbers is returned, assuming that they can be |
||||||
|
used for the missed batch recovery procedure. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batches_numbers`: the list of batch numbers that must be updated. |
||||||
|
- `l1_txs`: a map containing transaction hashes as keys, and values are maps |
||||||
|
with transaction hashes and transaction timestamps of L1 transactions to import to the database. |
||||||
|
- `tx_hash`: the hash of the L1 transaction to build an association with. |
||||||
|
- `association_key`: the field in the batch description to build an association with L1 |
||||||
|
transactions. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `:ok` if batches and the corresponding L1 transactions are imported successfully. |
||||||
|
- `{:recovery_required, batches_to_recover}` if the absence of batches is discovered; |
||||||
|
`batches_to_recover` contains the list of batch numbers. |
||||||
|
""" |
||||||
|
@spec associate_and_import_or_prepare_for_recovery([integer()], map(), binary(), :commit_id | :execute_id | :prove_id) :: |
||||||
|
:ok | {:recovery_required, [integer()]} |
||||||
|
def associate_and_import_or_prepare_for_recovery(batches_numbers, l1_txs, tx_hash, association_key) |
||||||
|
when is_list(batches_numbers) and is_map(l1_txs) and is_binary(tx_hash) and |
||||||
|
association_key in [:commit_id, :prove_id, :execute_id] do |
||||||
|
case prepare_batches_to_import(batches_numbers, %{association_key => l1_txs[tx_hash][:id]}) do |
||||||
|
{:error, batches_to_recover} -> |
||||||
|
{:recovery_required, batches_to_recover} |
||||||
|
|
||||||
|
{:ok, batches_to_import} -> |
||||||
|
Db.import_to_db(batches_to_import, Map.values(l1_txs)) |
||||||
|
:ok |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
# Receives batches from the database and merges each batch's data with the data provided |
||||||
|
# in `map_to_update`. If the number of batches returned from the database does not match |
||||||
|
# with the requested batches, the initial list of batch numbers is returned, assuming that they |
||||||
|
# can be used for the missed batch recovery procedure. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `batches`: the list of batch numbers that must be updated. |
||||||
|
# - `map_to_update`: a map containing new data that must be applied to all requested batches. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `{:ok, batches_to_import}` where `batches_to_import` is the list of batches ready to import |
||||||
|
# with updated data. |
||||||
|
# - `{:error, batches}` where `batches` contains the input list of batch numbers. |
||||||
|
defp prepare_batches_to_import(batches, map_to_update) do |
||||||
|
batches_from_db = Reader.batches(batches, []) |
||||||
|
|
||||||
|
if length(batches_from_db) == length(batches) do |
||||||
|
batches_to_import = |
||||||
|
batches_from_db |
||||||
|
|> Enum.reduce([], fn batch, batches -> |
||||||
|
[ |
||||||
|
batch |
||||||
|
|> Rpc.transform_transaction_batch_to_map() |
||||||
|
|> Map.merge(map_to_update) |
||||||
|
| batches |
||||||
|
] |
||||||
|
end) |
||||||
|
|
||||||
|
{:ok, batches_to_import} |
||||||
|
else |
||||||
|
log_warning("Lack of batches received from DB to update") |
||||||
|
{:error, batches} |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,78 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.StatusTracking.Executed do |
||||||
|
@moduledoc """ |
||||||
|
Functionality to discover executed batches |
||||||
|
""" |
||||||
|
|
||||||
|
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc} |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils, |
||||||
|
only: [ |
||||||
|
check_if_batch_status_changed: 3, |
||||||
|
associate_and_import_or_prepare_for_recovery: 4 |
||||||
|
] |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1] |
||||||
|
|
||||||
|
# keccak256("BlockExecution(uint256,bytes32,bytes32)") |
||||||
|
@block_execution_event "0x2402307311a4d6604e4e7b4c8a15a7e1213edb39c16a31efa70afb06030d3165" |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Checks if the oldest unexecuted batch in the database has the associated L1 executing transaction |
||||||
|
by requesting new batch details from RPC. If so, analyzes the `BlockExecution` event emitted by |
||||||
|
the transaction to explore all the batches executed by it. For all discovered batches, it updates |
||||||
|
the database with new associations, importing information about L1 transactions. |
||||||
|
If it is found that some of the discovered batches are absent in the database, the function |
||||||
|
interrupts and returns the list of batch numbers that can be attempted to be recovered. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `config`: Configuration containing `json_l1_rpc_named_arguments` and |
||||||
|
`json_l2_rpc_named_arguments` defining parameters for the RPC connections. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `:ok` if no new executed batches are found, or if all found batches and the corresponding L1 |
||||||
|
transactions are imported successfully. |
||||||
|
- `{:recovery_required, batches_to_recover}` if the absence of new executed batches is |
||||||
|
discovered; `batches_to_recover` contains the list of batch numbers. |
||||||
|
""" |
||||||
|
@spec look_for_batches_and_update(%{ |
||||||
|
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
:json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: :ok | {:recovery_required, list()} |
||||||
|
def look_for_batches_and_update( |
||||||
|
%{ |
||||||
|
json_l1_rpc_named_arguments: json_l1_rpc_named_arguments, |
||||||
|
json_l2_rpc_named_arguments: json_l2_rpc_named_arguments |
||||||
|
} = _config |
||||||
|
) do |
||||||
|
case Db.get_earliest_unexecuted_batch_number() do |
||||||
|
nil -> |
||||||
|
:ok |
||||||
|
|
||||||
|
expected_batch_number -> |
||||||
|
log_info("Checking if the batch #{expected_batch_number} was executed") |
||||||
|
|
||||||
|
{next_action, tx_hash, l1_txs} = |
||||||
|
check_if_batch_status_changed(expected_batch_number, :execute_tx, json_l2_rpc_named_arguments) |
||||||
|
|
||||||
|
case next_action do |
||||||
|
:skip -> |
||||||
|
:ok |
||||||
|
|
||||||
|
:look_for_batches -> |
||||||
|
log_info("The batch #{expected_batch_number} looks like executed") |
||||||
|
execute_tx_receipt = Rpc.fetch_tx_receipt_by_hash(tx_hash, json_l1_rpc_named_arguments) |
||||||
|
batches_numbers_from_rpc = get_executed_batches_from_logs(execute_tx_receipt["logs"]) |
||||||
|
|
||||||
|
associate_and_import_or_prepare_for_recovery(batches_numbers_from_rpc, l1_txs, tx_hash, :execute_id) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp get_executed_batches_from_logs(logs) do |
||||||
|
executed_batches = Rpc.filter_logs_and_extract_topic_at(logs, @block_execution_event, 1) |
||||||
|
log_info("Discovered #{length(executed_batches)} executed batches in the executing tx") |
||||||
|
|
||||||
|
executed_batches |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,137 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.StatusTracking.Proven do |
||||||
|
@moduledoc """ |
||||||
|
Functionality to discover proven batches |
||||||
|
""" |
||||||
|
|
||||||
|
alias ABI.{FunctionSelector, TypeDecoder} |
||||||
|
alias Indexer.Fetcher.ZkSync.Utils.{Db, Rpc} |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.StatusTracking.CommonUtils, |
||||||
|
only: [ |
||||||
|
check_if_batch_status_changed: 3, |
||||||
|
associate_and_import_or_prepare_for_recovery: 4 |
||||||
|
] |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1] |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Checks if the oldest unproven batch in the database has the associated L1 proving transaction |
||||||
|
by requesting new batch details from RPC. If so, analyzes the calldata of the transaction |
||||||
|
to explore all the batches proven by it. For all discovered batches, it updates |
||||||
|
the database with new associations, importing information about L1 transactions. |
||||||
|
If it is found that some of the discovered batches are absent in the database, the function |
||||||
|
interrupts and returns the list of batch numbers that can be attempted to be recovered. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `config`: Configuration containing `json_l1_rpc_named_arguments` and |
||||||
|
`json_l2_rpc_named_arguments` defining parameters for the RPC connections. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `:ok` if no new proven batches are found, or if all found batches and the corresponding L1 |
||||||
|
transactions are imported successfully. |
||||||
|
- `{:recovery_required, batches_to_recover}` if the absence of new proven batches is |
||||||
|
discovered; `batches_to_recover` contains the list of batch numbers. |
||||||
|
""" |
||||||
|
@spec look_for_batches_and_update(%{ |
||||||
|
:json_l1_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
:json_l2_rpc_named_arguments => EthereumJSONRPC.json_rpc_named_arguments(), |
||||||
|
optional(any()) => any() |
||||||
|
}) :: :ok | {:recovery_required, list()} |
||||||
|
def look_for_batches_and_update( |
||||||
|
%{ |
||||||
|
json_l1_rpc_named_arguments: json_l1_rpc_named_arguments, |
||||||
|
json_l2_rpc_named_arguments: json_l2_rpc_named_arguments |
||||||
|
} = _config |
||||||
|
) do |
||||||
|
case Db.get_earliest_unproven_batch_number() do |
||||||
|
nil -> |
||||||
|
:ok |
||||||
|
|
||||||
|
expected_batch_number -> |
||||||
|
log_info("Checking if the batch #{expected_batch_number} was proven") |
||||||
|
|
||||||
|
{next_action, tx_hash, l1_txs} = |
||||||
|
check_if_batch_status_changed(expected_batch_number, :prove_tx, json_l2_rpc_named_arguments) |
||||||
|
|
||||||
|
case next_action do |
||||||
|
:skip -> |
||||||
|
:ok |
||||||
|
|
||||||
|
:look_for_batches -> |
||||||
|
log_info("The batch #{expected_batch_number} looks like proven") |
||||||
|
prove_tx = Rpc.fetch_tx_by_hash(tx_hash, json_l1_rpc_named_arguments) |
||||||
|
batches_numbers_from_rpc = get_proven_batches_from_calldata(prove_tx["input"]) |
||||||
|
|
||||||
|
associate_and_import_or_prepare_for_recovery(batches_numbers_from_rpc, l1_txs, tx_hash, :prove_id) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp get_proven_batches_from_calldata(calldata) do |
||||||
|
"0x7f61885c" <> encoded_params = calldata |
||||||
|
|
||||||
|
# /// @param batchNumber Rollup batch number |
||||||
|
# /// @param batchHash Hash of L2 batch |
||||||
|
# /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more |
||||||
|
# /// @param numberOfLayer1Txs Number of priority operations to be processed |
||||||
|
# /// @param priorityOperationsHash Hash of all priority operations from this batch |
||||||
|
# /// @param l2LogsTreeRoot Root hash of tree that contains L2 -> L1 messages from this batch |
||||||
|
# /// @param timestamp Rollup batch timestamp, have the same format as Ethereum batch constant |
||||||
|
# /// @param commitment Verified input for the zkSync circuit |
||||||
|
# struct StoredBatchInfo { |
||||||
|
# uint64 batchNumber; |
||||||
|
# bytes32 batchHash; |
||||||
|
# uint64 indexRepeatedStorageChanges; |
||||||
|
# uint256 numberOfLayer1Txs; |
||||||
|
# bytes32 priorityOperationsHash; |
||||||
|
# bytes32 l2LogsTreeRoot; |
||||||
|
# uint256 timestamp; |
||||||
|
# bytes32 commitment; |
||||||
|
# } |
||||||
|
# /// @notice Recursive proof input data (individual commitments are constructed onchain) |
||||||
|
# struct ProofInput { |
||||||
|
# uint256[] recursiveAggregationInput; |
||||||
|
# uint256[] serializedProof; |
||||||
|
# } |
||||||
|
# proveBatches(StoredBatchInfo calldata _prevBatch, StoredBatchInfo[] calldata _committedBatches, ProofInput calldata _proof) |
||||||
|
|
||||||
|
# IO.inspect(FunctionSelector.decode("proveBatches((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),(uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32)[],(uint256[],uint256[]))")) |
||||||
|
[_prev_batch, proven_batches, _proof] = |
||||||
|
TypeDecoder.decode( |
||||||
|
Base.decode16!(encoded_params, case: :lower), |
||||||
|
%FunctionSelector{ |
||||||
|
function: "proveBatches", |
||||||
|
types: [ |
||||||
|
tuple: [ |
||||||
|
uint: 64, |
||||||
|
bytes: 32, |
||||||
|
uint: 64, |
||||||
|
uint: 256, |
||||||
|
bytes: 32, |
||||||
|
bytes: 32, |
||||||
|
uint: 256, |
||||||
|
bytes: 32 |
||||||
|
], |
||||||
|
array: |
||||||
|
{:tuple, |
||||||
|
[ |
||||||
|
uint: 64, |
||||||
|
bytes: 32, |
||||||
|
uint: 64, |
||||||
|
uint: 256, |
||||||
|
bytes: 32, |
||||||
|
bytes: 32, |
||||||
|
uint: 256, |
||||||
|
bytes: 32 |
||||||
|
]}, |
||||||
|
tuple: [array: {:uint, 256}, array: {:uint, 256}] |
||||||
|
] |
||||||
|
} |
||||||
|
) |
||||||
|
|
||||||
|
log_info("Discovered #{length(proven_batches)} proven batches in the prove tx") |
||||||
|
|
||||||
|
proven_batches |
||||||
|
|> Enum.map(fn batch_info -> elem(batch_info, 0) end) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,149 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.TransactionBatch do |
||||||
|
@moduledoc """ |
||||||
|
Discovers new batches and populates the `zksync_transaction_batches` table. |
||||||
|
|
||||||
|
Repetitiveness is supported by sending a `:continue` message to itself every `recheck_interval` seconds. |
||||||
|
|
||||||
|
Each iteration compares the number of the last handled batch stored in the state with the |
||||||
|
latest batch available on the RPC node. If the rollup progresses, all batches between the |
||||||
|
last handled batch (exclusively) and the latest available batch (inclusively) are downloaded from RPC |
||||||
|
in chunks of `chunk_size` and imported into the `zksync_transaction_batches` table. If the latest |
||||||
|
available batch is too far from the last handled batch, only `batches_max_range` batches are downloaded. |
||||||
|
""" |
||||||
|
|
||||||
|
use GenServer |
||||||
|
use Indexer.Fetcher |
||||||
|
|
||||||
|
require Logger |
||||||
|
|
||||||
|
alias Explorer.Chain.ZkSync.Reader |
||||||
|
alias Indexer.Fetcher.ZkSync.Discovery.Workers |
||||||
|
alias Indexer.Fetcher.ZkSync.Utils.Rpc |
||||||
|
|
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_info: 1] |
||||||
|
|
||||||
|
def child_spec(start_link_arguments) do |
||||||
|
spec = %{ |
||||||
|
id: __MODULE__, |
||||||
|
start: {__MODULE__, :start_link, start_link_arguments}, |
||||||
|
restart: :transient, |
||||||
|
type: :worker |
||||||
|
} |
||||||
|
|
||||||
|
Supervisor.child_spec(spec, []) |
||||||
|
end |
||||||
|
|
||||||
|
def start_link(args, gen_server_options \\ []) do |
||||||
|
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__)) |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def init(args) do |
||||||
|
Logger.metadata(fetcher: :zksync_transaction_batches) |
||||||
|
|
||||||
|
config = Application.get_all_env(:indexer)[Indexer.Fetcher.ZkSync.TransactionBatch] |
||||||
|
chunk_size = config[:chunk_size] |
||||||
|
recheck_interval = config[:recheck_interval] |
||||||
|
batches_max_range = config[:batches_max_range] |
||||||
|
|
||||||
|
Process.send(self(), :init, []) |
||||||
|
|
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
config: %{ |
||||||
|
chunk_size: chunk_size, |
||||||
|
batches_max_range: batches_max_range, |
||||||
|
json_rpc_named_arguments: args[:json_rpc_named_arguments], |
||||||
|
recheck_interval: recheck_interval |
||||||
|
}, |
||||||
|
data: %{latest_handled_batch_number: 0} |
||||||
|
}} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info(:init, state) do |
||||||
|
latest_handled_batch_number = |
||||||
|
case Reader.latest_available_batch_number() do |
||||||
|
nil -> |
||||||
|
log_info("No batches found in DB. Will start with the latest batch available by RPC") |
||||||
|
# The value received from RPC is decremented in order to not waste |
||||||
|
# the first iteration of handling `:continue` message. |
||||||
|
Rpc.fetch_latest_sealed_batch_number(state.config.json_rpc_named_arguments) - 1 |
||||||
|
|
||||||
|
latest_handled_batch_number -> |
||||||
|
latest_handled_batch_number |
||||||
|
end |
||||||
|
|
||||||
|
Process.send_after(self(), :continue, 2000) |
||||||
|
|
||||||
|
log_info("All batches including #{latest_handled_batch_number} are considered as handled") |
||||||
|
|
||||||
|
{:noreply, %{state | data: %{latest_handled_batch_number: latest_handled_batch_number}}} |
||||||
|
end |
||||||
|
|
||||||
|
# Checks if the rollup progresses by comparing the recently stored batch |
||||||
|
# with the latest batch received from RPC. If progress is detected, it downloads |
||||||
|
# batches, builds their associations with rollup blocks and transactions, and |
||||||
|
# imports the received data to the database. If the latest batch received from RPC |
||||||
|
# is too far from the most recently stored batch, only `batches_max_range` batches |
||||||
|
# are downloaded. All RPC calls to get batch details and receive transactions |
||||||
|
# included in batches are made in chunks of `chunk_size`. |
||||||
|
# |
||||||
|
# After importing batch information, it schedules the next iteration by sending |
||||||
|
# the `:continue` message. The sending of the message is delayed, taking into account |
||||||
|
# the time remaining after downloading and importing processes. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `:continue`: The message triggering the handler. |
||||||
|
# - `state`: The current state of the fetcher containing both the fetcher configuration |
||||||
|
# and the latest handled batch number. |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# - `{:noreply, new_state}` where the latest handled batch number is updated with the largest |
||||||
|
# of the batch numbers imported in the current iteration. |
||||||
|
@impl GenServer |
||||||
|
def handle_info( |
||||||
|
:continue, |
||||||
|
%{ |
||||||
|
data: %{latest_handled_batch_number: latest_handled_batch_number}, |
||||||
|
config: %{ |
||||||
|
batches_max_range: batches_max_range, |
||||||
|
json_rpc_named_arguments: json_rpc_named_arguments, |
||||||
|
recheck_interval: recheck_interval, |
||||||
|
chunk_size: _ |
||||||
|
} |
||||||
|
} = state |
||||||
|
) do |
||||||
|
log_info("Checking for a new batch or batches") |
||||||
|
|
||||||
|
latest_sealed_batch_number = Rpc.fetch_latest_sealed_batch_number(json_rpc_named_arguments) |
||||||
|
|
||||||
|
{new_state, handle_duration} = |
||||||
|
if latest_handled_batch_number < latest_sealed_batch_number do |
||||||
|
start_batch_number = latest_handled_batch_number + 1 |
||||||
|
end_batch_number = min(latest_sealed_batch_number, latest_handled_batch_number + batches_max_range) |
||||||
|
|
||||||
|
log_info("Handling the batch range #{start_batch_number}..#{end_batch_number}") |
||||||
|
|
||||||
|
{handle_duration, _} = |
||||||
|
:timer.tc(&Workers.get_minimal_batches_info_and_import/3, [start_batch_number, end_batch_number, state.config]) |
||||||
|
|
||||||
|
{ |
||||||
|
%{state | data: %{latest_handled_batch_number: end_batch_number}}, |
||||||
|
div(handle_duration, 1000) |
||||||
|
} |
||||||
|
else |
||||||
|
{state, 0} |
||||||
|
end |
||||||
|
|
||||||
|
Process.send_after(self(), :continue, max(:timer.seconds(recheck_interval) - handle_duration, 0)) |
||||||
|
|
||||||
|
{:noreply, new_state} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info({ref, _result}, state) do |
||||||
|
Process.demonitor(ref, [:flush]) |
||||||
|
{:noreply, state} |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,204 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.Utils.Db do |
||||||
|
@moduledoc """ |
||||||
|
Common functions to simplify DB routines for Indexer.Fetcher.ZkSync fetchers |
||||||
|
""" |
||||||
|
|
||||||
|
alias Explorer.Chain |
||||||
|
alias Explorer.Chain.ZkSync.Reader |
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_warning: 1, log_info: 1] |
||||||
|
|
||||||
|
@json_batch_fields_absent_in_db_batch [ |
||||||
|
:commit_tx_hash, |
||||||
|
:commit_timestamp, |
||||||
|
:prove_tx_hash, |
||||||
|
:prove_timestamp, |
||||||
|
:executed_tx_hash, |
||||||
|
:executed_timestamp |
||||||
|
] |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Deletes elements in the batch description map to prepare the batch for importing to |
||||||
|
the database. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batch_with_json_fields`: a map describing a batch with elements that could remain |
||||||
|
after downloading batch details from RPC. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A map describing the batch compatible with the database import operation. |
||||||
|
""" |
||||||
|
@spec prune_json_batch(map()) :: map() |
||||||
|
def prune_json_batch(batch_with_json_fields) |
||||||
|
when is_map(batch_with_json_fields) do |
||||||
|
Map.drop(batch_with_json_fields, @json_batch_fields_absent_in_db_batch) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the oldest imported batch number. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- none |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A batch number or `nil` if there are no batches in the database. |
||||||
|
""" |
||||||
|
@spec get_earliest_batch_number() :: nil | non_neg_integer() |
||||||
|
def get_earliest_batch_number do |
||||||
|
case Reader.oldest_available_batch_number() do |
||||||
|
nil -> |
||||||
|
log_warning("No batches found in DB") |
||||||
|
nil |
||||||
|
|
||||||
|
value -> |
||||||
|
value |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the oldest imported batch number without an associated commitment L1 transaction. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- none |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A batch number or `nil` in cases where there are no batches in the database or |
||||||
|
all batches in the database are marked as committed. |
||||||
|
""" |
||||||
|
@spec get_earliest_sealed_batch_number() :: nil | non_neg_integer() |
||||||
|
def get_earliest_sealed_batch_number do |
||||||
|
case Reader.earliest_sealed_batch_number() do |
||||||
|
nil -> |
||||||
|
log_info("No uncommitted batches found in DB") |
||||||
|
nil |
||||||
|
|
||||||
|
value -> |
||||||
|
value |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the oldest imported batch number without an associated proving L1 transaction. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- none |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A batch number or `nil` in cases where there are no batches in the database or |
||||||
|
all batches in the database are marked as proven. |
||||||
|
""" |
||||||
|
@spec get_earliest_unproven_batch_number() :: nil | non_neg_integer() |
||||||
|
def get_earliest_unproven_batch_number do |
||||||
|
case Reader.earliest_unproven_batch_number() do |
||||||
|
nil -> |
||||||
|
log_info("No unproven batches found in DB") |
||||||
|
nil |
||||||
|
|
||||||
|
value -> |
||||||
|
value |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the oldest imported batch number without an associated executing L1 transaction. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- none |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A batch number or `nil` in cases where there are no batches in the database or |
||||||
|
all batches in the database are marked as executed. |
||||||
|
""" |
||||||
|
@spec get_earliest_unexecuted_batch_number() :: nil | non_neg_integer() |
||||||
|
def get_earliest_unexecuted_batch_number do |
||||||
|
case Reader.earliest_unexecuted_batch_number() do |
||||||
|
nil -> |
||||||
|
log_info("No not executed batches found in DB") |
||||||
|
nil |
||||||
|
|
||||||
|
value -> |
||||||
|
value |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Indexes L1 transactions provided in the input map. For transactions that |
||||||
|
are already in the database, existing indices are taken. For new transactions, |
||||||
|
the next available indices are assigned. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `new_l1_txs`: A map of L1 transaction descriptions. The keys of the map are |
||||||
|
transaction hashes. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- `l1_txs`: A map of L1 transaction descriptions. Each element is extended with |
||||||
|
the key `:id`, representing the index of the L1 transaction in the |
||||||
|
`zksync_lifecycle_l1_transactions` table. |
||||||
|
""" |
||||||
|
@spec get_indices_for_l1_transactions(map()) :: any() |
||||||
|
def get_indices_for_l1_transactions(new_l1_txs) |
||||||
|
when is_map(new_l1_txs) do |
||||||
|
# Get indices for l1 transactions previously handled |
||||||
|
l1_txs = |
||||||
|
new_l1_txs |
||||||
|
|> Map.keys() |
||||||
|
|> Reader.lifecycle_transactions() |
||||||
|
|> Enum.reduce(new_l1_txs, fn {hash, id}, txs -> |
||||||
|
{_, txs} = |
||||||
|
Map.get_and_update!(txs, hash.bytes, fn l1_tx -> |
||||||
|
{l1_tx, Map.put(l1_tx, :id, id)} |
||||||
|
end) |
||||||
|
|
||||||
|
txs |
||||||
|
end) |
||||||
|
|
||||||
|
# Get the next index for the first new transaction based |
||||||
|
# on the indices existing in DB |
||||||
|
l1_tx_next_id = Reader.next_id() |
||||||
|
|
||||||
|
# Assign new indices for the transactions which are not in |
||||||
|
# the l1 transactions table yet |
||||||
|
{updated_l1_txs, _} = |
||||||
|
l1_txs |
||||||
|
|> Map.keys() |
||||||
|
|> Enum.reduce( |
||||||
|
{l1_txs, l1_tx_next_id}, |
||||||
|
fn hash, {txs, next_id} -> |
||||||
|
tx = txs[hash] |
||||||
|
id = Map.get(tx, :id) |
||||||
|
|
||||||
|
if is_nil(id) do |
||||||
|
{Map.put(txs, hash, Map.put(tx, :id, next_id)), next_id + 1} |
||||||
|
else |
||||||
|
{txs, next_id} |
||||||
|
end |
||||||
|
end |
||||||
|
) |
||||||
|
|
||||||
|
updated_l1_txs |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Imports provided lists of batches and their associations with L1 transactions, rollup blocks, |
||||||
|
and transactions to the database. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batches`: A list of maps with batch descriptions. |
||||||
|
- `l1_txs`: A list of maps with L1 transaction descriptions. Optional. |
||||||
|
- `l2_txs`: A list of maps with rollup transaction associations. Optional. |
||||||
|
- `l2_blocks`: A list of maps with rollup block associations. Optional. |
||||||
|
|
||||||
|
## Returns |
||||||
|
n/a |
||||||
|
""" |
||||||
|
def import_to_db(batches, l1_txs \\ [], l2_txs \\ [], l2_blocks \\ []) |
||||||
|
when is_list(batches) and is_list(l1_txs) and is_list(l2_txs) and is_list(l2_blocks) do |
||||||
|
{:ok, _} = |
||||||
|
Chain.import(%{ |
||||||
|
zksync_lifecycle_transactions: %{params: l1_txs}, |
||||||
|
zksync_transaction_batches: %{params: batches}, |
||||||
|
zksync_batch_transactions: %{params: l2_txs}, |
||||||
|
zksync_batch_blocks: %{params: l2_blocks}, |
||||||
|
timeout: :infinity |
||||||
|
}) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,143 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.Utils.Logging do |
||||||
|
@moduledoc """ |
||||||
|
Common logging functions for Indexer.Fetcher.ZkSync fetchers |
||||||
|
""" |
||||||
|
require Logger |
||||||
|
|
||||||
|
@doc """ |
||||||
|
A helper function to log a message with warning severity. Uses `Logger.warning` facility. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `msg`: a message to log |
||||||
|
|
||||||
|
## Returns |
||||||
|
`:ok` |
||||||
|
""" |
||||||
|
@spec log_warning(any()) :: :ok |
||||||
|
def log_warning(msg) do |
||||||
|
Logger.warning(msg) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
A helper function to log a message with info severity. Uses `Logger.info` facility. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `msg`: a message to log |
||||||
|
|
||||||
|
## Returns |
||||||
|
`:ok` |
||||||
|
""" |
||||||
|
@spec log_info(any()) :: :ok |
||||||
|
def log_info(msg) do |
||||||
|
Logger.info(msg) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
A helper function to log a message with error severity. Uses `Logger.error` facility. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `msg`: a message to log |
||||||
|
|
||||||
|
## Returns |
||||||
|
`:ok` |
||||||
|
""" |
||||||
|
@spec log_error(any()) :: :ok |
||||||
|
def log_error(msg) do |
||||||
|
Logger.error(msg) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
A helper function to log progress when handling batches in chunks. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `prefix`: A prefix for the logging message. |
||||||
|
- `chunk`: A list of batch numbers in the current chunk. |
||||||
|
- `current_progress`: The total number of batches handled up to this moment. |
||||||
|
- `total`: The total number of batches across all chunks. |
||||||
|
|
||||||
|
## Returns |
||||||
|
`:ok` |
||||||
|
|
||||||
|
## Examples: |
||||||
|
- `log_details_chunk_handling("A message", [1, 2, 3], 0, 10)` produces |
||||||
|
`A message for batches 1..3. Progress 30%` |
||||||
|
- `log_details_chunk_handling("A message", [2], 1, 10)` produces |
||||||
|
`A message for batch 2. Progress 20%` |
||||||
|
- `log_details_chunk_handling("A message", [35], 0, 1)` produces |
||||||
|
`A message for batch 35.` |
||||||
|
- `log_details_chunk_handling("A message", [45, 50, 51, 52, 60], 1, 1)` produces |
||||||
|
`A message for batches 45, 50..52, 60.` |
||||||
|
""" |
||||||
|
@spec log_details_chunk_handling(binary(), list(), non_neg_integer(), non_neg_integer()) :: :ok |
||||||
|
def log_details_chunk_handling(prefix, chunk, current_progress, total) |
||||||
|
when is_binary(prefix) and is_list(chunk) and (is_integer(current_progress) and current_progress >= 0) and |
||||||
|
(is_integer(total) and total > 0) do |
||||||
|
chunk_length = length(chunk) |
||||||
|
|
||||||
|
progress = |
||||||
|
case chunk_length == total do |
||||||
|
true -> |
||||||
|
"" |
||||||
|
|
||||||
|
false -> |
||||||
|
percentage = |
||||||
|
(current_progress + chunk_length) |
||||||
|
|> Decimal.div(total) |
||||||
|
|> Decimal.mult(100) |
||||||
|
|> Decimal.round(2) |
||||||
|
|> Decimal.to_string() |
||||||
|
|
||||||
|
" Progress: #{percentage}%" |
||||||
|
end |
||||||
|
|
||||||
|
if chunk_length == 1 do |
||||||
|
log_info("#{prefix} for batch ##{Enum.at(chunk, 0)}.") |
||||||
|
else |
||||||
|
log_info("#{prefix} for batches #{Enum.join(shorten_numbers_list(chunk), ", ")}.#{progress}") |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
# Transform list of numbers to the list of string where consequent values |
||||||
|
# are combined to be displayed as a range. |
||||||
|
# |
||||||
|
# ## Parameters |
||||||
|
# - `msg`: a message to log |
||||||
|
# |
||||||
|
# ## Returns |
||||||
|
# `shorten_list` - resulting list after folding |
||||||
|
# |
||||||
|
# ## Examples: |
||||||
|
# [1, 2, 3] => ["1..3"] |
||||||
|
# [1, 3] => ["1", "3"] |
||||||
|
# [1, 2] => ["1..2"] |
||||||
|
# [1, 3, 4, 5] => ["1", "3..5"] |
||||||
|
defp shorten_numbers_list(numbers_list) do |
||||||
|
{shorten_list, _, _} = |
||||||
|
numbers_list |
||||||
|
|> Enum.sort() |
||||||
|
|> Enum.reduce({[], nil, nil}, fn number, {shorten_list, prev_range_start, prev_number} -> |
||||||
|
shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number) |
||||||
|
end) |
||||||
|
|> then(fn {shorten_list, prev_range_start, prev_number} -> |
||||||
|
shorten_numbers_list_impl(prev_number, shorten_list, prev_range_start, prev_number) |
||||||
|
end) |
||||||
|
|
||||||
|
Enum.reverse(shorten_list) |
||||||
|
end |
||||||
|
|
||||||
|
defp shorten_numbers_list_impl(number, shorten_list, prev_range_start, prev_number) do |
||||||
|
cond do |
||||||
|
is_nil(prev_number) -> |
||||||
|
{[], number, number} |
||||||
|
|
||||||
|
prev_number + 1 != number and prev_range_start == prev_number -> |
||||||
|
{["#{prev_range_start}" | shorten_list], number, number} |
||||||
|
|
||||||
|
prev_number + 1 != number -> |
||||||
|
{["#{prev_range_start}..#{prev_number}" | shorten_list], number, number} |
||||||
|
|
||||||
|
true -> |
||||||
|
{shorten_list, prev_range_start, number} |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,403 @@ |
|||||||
|
defmodule Indexer.Fetcher.ZkSync.Utils.Rpc do |
||||||
|
@moduledoc """ |
||||||
|
Common functions to handle RPC calls for Indexer.Fetcher.ZkSync fetchers |
||||||
|
""" |
||||||
|
|
||||||
|
import EthereumJSONRPC, only: [json_rpc: 2, quantity_to_integer: 1] |
||||||
|
import Indexer.Fetcher.ZkSync.Utils.Logging, only: [log_error: 1] |
||||||
|
|
||||||
|
@zero_hash "0000000000000000000000000000000000000000000000000000000000000000" |
||||||
|
@zero_hash_binary <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>> |
||||||
|
|
||||||
|
@rpc_resend_attempts 20 |
||||||
|
|
||||||
|
def get_zero_hash do |
||||||
|
@zero_hash |
||||||
|
end |
||||||
|
|
||||||
|
def get_binary_zero_hash do |
||||||
|
@zero_hash_binary |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Filters out logs from a list of transactions logs where topic #0 is `topic_0` and |
||||||
|
builds a list of values located at position `position` in such logs. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `logs`: The list of transaction logs to filter logs with a specific topic. |
||||||
|
- `topic_0`: The value of topic #0 in the required logs. |
||||||
|
- `position`: The topic number to be extracted from the topic lists of every log |
||||||
|
and appended to the resulting list. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of values extracted from the required transaction logs. |
||||||
|
- An empty list if no logs with the specified topic are found. |
||||||
|
""" |
||||||
|
@spec filter_logs_and_extract_topic_at(maybe_improper_list(), binary(), integer()) :: list() |
||||||
|
def filter_logs_and_extract_topic_at(logs, topic_0, position) |
||||||
|
when is_list(logs) and |
||||||
|
is_binary(topic_0) and |
||||||
|
(is_integer(position) and position >= 0 and position <= 3) do |
||||||
|
logs |
||||||
|
|> Enum.reduce([], fn log_entity, result -> |
||||||
|
topics = log_entity["topics"] |
||||||
|
|
||||||
|
if Enum.at(topics, 0) == topic_0 do |
||||||
|
[quantity_to_integer(Enum.at(topics, position)) | result] |
||||||
|
else |
||||||
|
result |
||||||
|
end |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
defp from_ts_to_datetime(time_ts) do |
||||||
|
{_, unix_epoch_starts} = DateTime.from_unix(0) |
||||||
|
|
||||||
|
case is_nil(time_ts) or time_ts == 0 do |
||||||
|
true -> |
||||||
|
unix_epoch_starts |
||||||
|
|
||||||
|
false -> |
||||||
|
case DateTime.from_unix(time_ts) do |
||||||
|
{:ok, datetime} -> |
||||||
|
datetime |
||||||
|
|
||||||
|
{:error, _} -> |
||||||
|
unix_epoch_starts |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp from_iso8601_to_datetime(time_string) do |
||||||
|
case is_nil(time_string) do |
||||||
|
true -> |
||||||
|
from_ts_to_datetime(0) |
||||||
|
|
||||||
|
false -> |
||||||
|
case DateTime.from_iso8601(time_string) do |
||||||
|
{:ok, datetime, _} -> |
||||||
|
datetime |
||||||
|
|
||||||
|
{:error, _} -> |
||||||
|
from_ts_to_datetime(0) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp json_txid_to_hash(hash) do |
||||||
|
case hash do |
||||||
|
"0x" <> tx_hash -> tx_hash |
||||||
|
nil -> @zero_hash |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp strhash_to_byteshash(hash) do |
||||||
|
hash |
||||||
|
|> json_txid_to_hash() |
||||||
|
|> Base.decode16!(case: :mixed) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Transforms a map with batch data received from the `zks_getL1BatchDetails` call |
||||||
|
into a map that can be used by Indexer.Fetcher.ZkSync fetchers for further handling. |
||||||
|
All hexadecimal hashes are converted to their decoded binary representation, |
||||||
|
Unix and ISO8601 timestamps are converted to DateTime objects. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `json_response`: Raw data received from the JSON RPC call. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A map containing minimal information about the batch. `start_block` and `end_block` |
||||||
|
elements are set to `nil`. |
||||||
|
""" |
||||||
|
@spec transform_batch_details_to_map(map()) :: map() |
||||||
|
def transform_batch_details_to_map(json_response) |
||||||
|
when is_map(json_response) do |
||||||
|
%{ |
||||||
|
"number" => {:number, :ok}, |
||||||
|
"timestamp" => {:timestamp, :ts_to_datetime}, |
||||||
|
"l1TxCount" => {:l1_tx_count, :ok}, |
||||||
|
"l2TxCount" => {:l2_tx_count, :ok}, |
||||||
|
"rootHash" => {:root_hash, :str_to_byteshash}, |
||||||
|
"commitTxHash" => {:commit_tx_hash, :str_to_byteshash}, |
||||||
|
"committedAt" => {:commit_timestamp, :iso8601_to_datetime}, |
||||||
|
"proveTxHash" => {:prove_tx_hash, :str_to_byteshash}, |
||||||
|
"provenAt" => {:prove_timestamp, :iso8601_to_datetime}, |
||||||
|
"executeTxHash" => {:executed_tx_hash, :str_to_byteshash}, |
||||||
|
"executedAt" => {:executed_timestamp, :iso8601_to_datetime}, |
||||||
|
"l1GasPrice" => {:l1_gas_price, :ok}, |
||||||
|
"l2FairGasPrice" => {:l2_fair_gas_price, :ok} |
||||||
|
# :start_block added by request_block_ranges_by_rpc |
||||||
|
# :end_block added by request_block_ranges_by_rpc |
||||||
|
} |
||||||
|
|> Enum.reduce(%{start_block: nil, end_block: nil}, fn {key, {key_atom, transform_type}}, batch_details_map -> |
||||||
|
value_in_json_response = Map.get(json_response, key) |
||||||
|
|
||||||
|
Map.put( |
||||||
|
batch_details_map, |
||||||
|
key_atom, |
||||||
|
case transform_type do |
||||||
|
:iso8601_to_datetime -> from_iso8601_to_datetime(value_in_json_response) |
||||||
|
:ts_to_datetime -> from_ts_to_datetime(value_in_json_response) |
||||||
|
:str_to_txhash -> json_txid_to_hash(value_in_json_response) |
||||||
|
:str_to_byteshash -> strhash_to_byteshash(value_in_json_response) |
||||||
|
_ -> value_in_json_response |
||||||
|
end |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Transforms a map with batch data received from the database into a map that |
||||||
|
can be used by Indexer.Fetcher.ZkSync fetchers for further handling. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batch`: A map containing a batch description received from the database. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A map containing simplified representation of the batch. Compatible with |
||||||
|
the database import operation. |
||||||
|
""" |
||||||
|
def transform_transaction_batch_to_map(batch) |
||||||
|
when is_map(batch) do |
||||||
|
%{ |
||||||
|
number: batch.number, |
||||||
|
timestamp: batch.timestamp, |
||||||
|
l1_tx_count: batch.l1_tx_count, |
||||||
|
l2_tx_count: batch.l2_tx_count, |
||||||
|
root_hash: batch.root_hash.bytes, |
||||||
|
l1_gas_price: batch.l1_gas_price, |
||||||
|
l2_fair_gas_price: batch.l2_fair_gas_price, |
||||||
|
start_block: batch.start_block, |
||||||
|
end_block: batch.end_block, |
||||||
|
commit_id: batch.commit_id, |
||||||
|
prove_id: batch.prove_id, |
||||||
|
execute_id: batch.execute_id |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Retrieves batch details from the RPC endpoint using the `zks_getL1BatchDetails` call. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `batch_number`: The batch number or identifier. |
||||||
|
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A map containing minimal batch details. It includes `start_block` and `end_block` |
||||||
|
elements, both set to `nil`. |
||||||
|
""" |
||||||
|
@spec fetch_batch_details_by_batch_number(binary() | non_neg_integer(), EthereumJSONRPC.json_rpc_named_arguments()) :: |
||||||
|
map() |
||||||
|
def fetch_batch_details_by_batch_number(batch_number, json_rpc_named_arguments) |
||||||
|
when (is_integer(batch_number) or is_binary(batch_number)) and is_list(json_rpc_named_arguments) do |
||||||
|
req = |
||||||
|
EthereumJSONRPC.request(%{ |
||||||
|
id: batch_number, |
||||||
|
method: "zks_getL1BatchDetails", |
||||||
|
params: [batch_number] |
||||||
|
}) |
||||||
|
|
||||||
|
error_message = &"Cannot call zks_getL1BatchDetails. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) |
||||||
|
|
||||||
|
transform_batch_details_to_map(resp) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches transaction details from the RPC endpoint using the `eth_getTransactionByHash` call. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `raw_hash`: The hash of the Ethereum transaction. It can be provided as a decoded binary |
||||||
|
or hexadecimal string. |
||||||
|
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A map containing details of the transaction. |
||||||
|
""" |
||||||
|
@spec fetch_tx_by_hash(binary(), EthereumJSONRPC.json_rpc_named_arguments()) :: map() |
||||||
|
def fetch_tx_by_hash(raw_hash, json_rpc_named_arguments) |
||||||
|
when is_binary(raw_hash) and is_list(json_rpc_named_arguments) do |
||||||
|
hash = |
||||||
|
case raw_hash do |
||||||
|
"0x" <> _ -> raw_hash |
||||||
|
_ -> "0x" <> Base.encode16(raw_hash) |
||||||
|
end |
||||||
|
|
||||||
|
req = |
||||||
|
EthereumJSONRPC.request(%{ |
||||||
|
id: 0, |
||||||
|
method: "eth_getTransactionByHash", |
||||||
|
params: [hash] |
||||||
|
}) |
||||||
|
|
||||||
|
error_message = &"Cannot call eth_getTransactionByHash for hash #{hash}. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) |
||||||
|
|
||||||
|
resp |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches the transaction receipt from the RPC endpoint using the `eth_getTransactionReceipt` call. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `raw_hash`: The hash of the Ethereum transaction. It can be provided as a decoded binary |
||||||
|
or hexadecimal string. |
||||||
|
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A map containing the receipt details of the transaction. |
||||||
|
""" |
||||||
|
@spec fetch_tx_receipt_by_hash(binary(), EthereumJSONRPC.json_rpc_named_arguments()) :: map() |
||||||
|
def fetch_tx_receipt_by_hash(raw_hash, json_rpc_named_arguments) |
||||||
|
when is_binary(raw_hash) and is_list(json_rpc_named_arguments) do |
||||||
|
hash = |
||||||
|
case raw_hash do |
||||||
|
"0x" <> _ -> raw_hash |
||||||
|
_ -> "0x" <> Base.encode16(raw_hash) |
||||||
|
end |
||||||
|
|
||||||
|
req = |
||||||
|
EthereumJSONRPC.request(%{ |
||||||
|
id: 0, |
||||||
|
method: "eth_getTransactionReceipt", |
||||||
|
params: [hash] |
||||||
|
}) |
||||||
|
|
||||||
|
error_message = &"Cannot call eth_getTransactionReceipt for hash #{hash}. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) |
||||||
|
|
||||||
|
resp |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches the latest sealed batch number from the RPC endpoint using the `zks_L1BatchNumber` call. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A non-negative integer representing the latest sealed batch number. |
||||||
|
""" |
||||||
|
@spec fetch_latest_sealed_batch_number(EthereumJSONRPC.json_rpc_named_arguments()) :: nil | non_neg_integer() |
||||||
|
def fetch_latest_sealed_batch_number(json_rpc_named_arguments) |
||||||
|
when is_list(json_rpc_named_arguments) do |
||||||
|
req = EthereumJSONRPC.request(%{id: 0, method: "zks_L1BatchNumber", params: []}) |
||||||
|
|
||||||
|
error_message = &"Cannot call zks_L1BatchNumber. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
{:ok, resp} = repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, @rpc_resend_attempts) |
||||||
|
|
||||||
|
quantity_to_integer(resp) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches block details using multiple `eth_getBlockByNumber` RPC calls. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple |
||||||
|
`eth_getBlockByNumber` RPC calls for different block numbers. |
||||||
|
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of responses containing details of the requested blocks. |
||||||
|
""" |
||||||
|
@spec fetch_blocks_details([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) :: |
||||||
|
list() |
||||||
|
def fetch_blocks_details(requests_list, json_rpc_named_arguments) |
||||||
|
|
||||||
|
def fetch_blocks_details([], _) do |
||||||
|
[] |
||||||
|
end |
||||||
|
|
||||||
|
def fetch_blocks_details(requests_list, json_rpc_named_arguments) |
||||||
|
when is_list(requests_list) and is_list(json_rpc_named_arguments) do |
||||||
|
error_message = &"Cannot call eth_getBlockByNumber. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
{:ok, responses} = |
||||||
|
repeated_call(&json_rpc/2, [requests_list, json_rpc_named_arguments], error_message, @rpc_resend_attempts) |
||||||
|
|
||||||
|
responses |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches batches details using multiple `zks_getL1BatchDetails` RPC calls. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple |
||||||
|
`zks_getL1BatchDetails` RPC calls for different block numbers. |
||||||
|
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of responses containing details of the requested batches. |
||||||
|
""" |
||||||
|
@spec fetch_batches_details([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) :: |
||||||
|
list() |
||||||
|
def fetch_batches_details(requests_list, json_rpc_named_arguments) |
||||||
|
|
||||||
|
def fetch_batches_details([], _) do |
||||||
|
[] |
||||||
|
end |
||||||
|
|
||||||
|
def fetch_batches_details(requests_list, json_rpc_named_arguments) |
||||||
|
when is_list(requests_list) and is_list(json_rpc_named_arguments) do |
||||||
|
error_message = &"Cannot call zks_getL1BatchDetails. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
{:ok, responses} = |
||||||
|
repeated_call(&json_rpc/2, [requests_list, json_rpc_named_arguments], error_message, @rpc_resend_attempts) |
||||||
|
|
||||||
|
responses |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches block ranges included in the specified batches by using multiple |
||||||
|
`zks_getL1BatchBlockRange` RPC calls. |
||||||
|
|
||||||
|
## Parameters |
||||||
|
- `requests_list`: A list of `EthereumJSONRPC.Transport.request()` representing multiple |
||||||
|
`zks_getL1BatchBlockRange` RPC calls for different batch numbers. |
||||||
|
- `json_rpc_named_arguments`: Configuration parameters for the JSON RPC connection. |
||||||
|
|
||||||
|
## Returns |
||||||
|
- A list of responses containing block ranges associated with the requested batches. |
||||||
|
""" |
||||||
|
@spec fetch_blocks_ranges([EthereumJSONRPC.Transport.request()], EthereumJSONRPC.json_rpc_named_arguments()) :: |
||||||
|
list() |
||||||
|
def fetch_blocks_ranges(requests_list, json_rpc_named_arguments) |
||||||
|
|
||||||
|
def fetch_blocks_ranges([], _) do |
||||||
|
[] |
||||||
|
end |
||||||
|
|
||||||
|
def fetch_blocks_ranges(requests_list, json_rpc_named_arguments) |
||||||
|
when is_list(requests_list) and is_list(json_rpc_named_arguments) do |
||||||
|
error_message = &"Cannot call zks_getL1BatchBlockRange. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
{:ok, responses} = |
||||||
|
repeated_call(&json_rpc/2, [requests_list, json_rpc_named_arguments], error_message, @rpc_resend_attempts) |
||||||
|
|
||||||
|
responses |
||||||
|
end |
||||||
|
|
||||||
|
defp repeated_call(func, args, error_message, retries_left) do |
||||||
|
case apply(func, args) do |
||||||
|
{:ok, _} = res -> |
||||||
|
res |
||||||
|
|
||||||
|
{:error, message} = err -> |
||||||
|
retries_left = retries_left - 1 |
||||||
|
|
||||||
|
if retries_left <= 0 do |
||||||
|
log_error(error_message.(message)) |
||||||
|
err |
||||||
|
else |
||||||
|
log_error("#{error_message.(message)} Retrying...") |
||||||
|
:timer.sleep(3000) |
||||||
|
repeated_call(func, args, error_message, retries_left) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
end |
Loading…
Reference in new issue