Merge pull request #9098 from blockscout/va-zkevm-bridge
Polygon zkEVM Bridge indexer and API v2 extensionpull/9306/head
commit
6ae6692d02
@ -0,0 +1,101 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Runner.PolygonZkevm.BridgeL1Tokens do |
||||||
|
@moduledoc """ |
||||||
|
Bulk imports `t:Explorer.Chain.PolygonZkevm.BridgeL1Token.t/0`. |
||||||
|
""" |
||||||
|
|
||||||
|
require Ecto.Query |
||||||
|
|
||||||
|
import Ecto.Query, only: [from: 2] |
||||||
|
|
||||||
|
alias Ecto.{Changeset, Multi, Repo} |
||||||
|
alias Explorer.Chain.Import |
||||||
|
alias Explorer.Chain.PolygonZkevm.BridgeL1Token |
||||||
|
alias Explorer.Prometheus.Instrumenter |
||||||
|
|
||||||
|
@behaviour Import.Runner |
||||||
|
|
||||||
|
# milliseconds |
||||||
|
@timeout 60_000 |
||||||
|
|
||||||
|
@type imported :: [BridgeL1Token.t()] |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def ecto_schema_module, do: BridgeL1Token |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def option_key, do: :polygon_zkevm_bridge_l1_tokens |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def imported_table_row do |
||||||
|
%{ |
||||||
|
value_type: "[#{ecto_schema_module()}.t()]", |
||||||
|
value_description: "List of `t:#{ecto_schema_module()}.t/0`s" |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def run(multi, changes_list, %{timestamps: timestamps} = options) do |
||||||
|
insert_options = |
||||||
|
options |
||||||
|
|> Map.get(option_key(), %{}) |
||||||
|
|> Map.take(~w(on_conflict timeout)a) |
||||||
|
|> Map.put_new(:timeout, @timeout) |
||||||
|
|> Map.put(:timestamps, timestamps) |
||||||
|
|
||||||
|
Multi.run(multi, :insert_polygon_zkevm_bridge_l1_tokens, fn repo, _ -> |
||||||
|
Instrumenter.block_import_stage_runner( |
||||||
|
fn -> insert(repo, changes_list, insert_options) end, |
||||||
|
:block_referencing, |
||||||
|
:polygon_zkevm_bridge_l1_tokens, |
||||||
|
:polygon_zkevm_bridge_l1_tokens |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def timeout, do: @timeout |
||||||
|
|
||||||
|
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: |
||||||
|
{:ok, [BridgeL1Token.t()]} |
||||||
|
| {:error, [Changeset.t()]} |
||||||
|
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do |
||||||
|
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) |
||||||
|
|
||||||
|
# Enforce BridgeL1Token ShareLocks order (see docs: sharelock.md) |
||||||
|
ordered_changes_list = Enum.sort_by(changes_list, &{&1.address}) |
||||||
|
|
||||||
|
{:ok, inserted} = |
||||||
|
Import.insert_changes_list( |
||||||
|
repo, |
||||||
|
ordered_changes_list, |
||||||
|
conflict_target: :address, |
||||||
|
on_conflict: on_conflict, |
||||||
|
for: BridgeL1Token, |
||||||
|
returning: true, |
||||||
|
timeout: timeout, |
||||||
|
timestamps: timestamps |
||||||
|
) |
||||||
|
|
||||||
|
{:ok, inserted} |
||||||
|
end |
||||||
|
|
||||||
|
defp default_on_conflict do |
||||||
|
from( |
||||||
|
t in BridgeL1Token, |
||||||
|
update: [ |
||||||
|
set: [ |
||||||
|
decimals: fragment("EXCLUDED.decimals"), |
||||||
|
symbol: fragment("EXCLUDED.symbol"), |
||||||
|
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", t.inserted_at), |
||||||
|
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", t.updated_at) |
||||||
|
] |
||||||
|
], |
||||||
|
where: |
||||||
|
fragment( |
||||||
|
"(EXCLUDED.decimals, EXCLUDED.symbol) IS DISTINCT FROM (?, ?)", |
||||||
|
t.decimals, |
||||||
|
t.symbol |
||||||
|
) |
||||||
|
) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,115 @@ |
|||||||
|
defmodule Explorer.Chain.Import.Runner.PolygonZkevm.BridgeOperations do |
||||||
|
@moduledoc """ |
||||||
|
Bulk imports `t:Explorer.Chain.PolygonZkevm.Bridge.t/0`. |
||||||
|
""" |
||||||
|
|
||||||
|
require Ecto.Query |
||||||
|
|
||||||
|
import Ecto.Query, only: [from: 2] |
||||||
|
|
||||||
|
alias Ecto.{Changeset, Multi, Repo} |
||||||
|
alias Explorer.Chain.Import |
||||||
|
alias Explorer.Chain.PolygonZkevm.Bridge, as: PolygonZkevmBridge |
||||||
|
alias Explorer.Prometheus.Instrumenter |
||||||
|
|
||||||
|
@behaviour Import.Runner |
||||||
|
|
||||||
|
# milliseconds |
||||||
|
@timeout 60_000 |
||||||
|
|
||||||
|
@type imported :: [PolygonZkevmBridge.t()] |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def ecto_schema_module, do: PolygonZkevmBridge |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def option_key, do: :polygon_zkevm_bridge_operations |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def imported_table_row do |
||||||
|
%{ |
||||||
|
value_type: "[#{ecto_schema_module()}.t()]", |
||||||
|
value_description: "List of `t:#{ecto_schema_module()}.t/0`s" |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def run(multi, changes_list, %{timestamps: timestamps} = options) do |
||||||
|
insert_options = |
||||||
|
options |
||||||
|
|> Map.get(option_key(), %{}) |
||||||
|
|> Map.take(~w(on_conflict timeout)a) |
||||||
|
|> Map.put_new(:timeout, @timeout) |
||||||
|
|> Map.put(:timestamps, timestamps) |
||||||
|
|
||||||
|
Multi.run(multi, :insert_polygon_zkevm_bridge_operations, fn repo, _ -> |
||||||
|
Instrumenter.block_import_stage_runner( |
||||||
|
fn -> insert(repo, changes_list, insert_options) end, |
||||||
|
:block_referencing, |
||||||
|
:polygon_zkevm_bridge_operations, |
||||||
|
:polygon_zkevm_bridge_operations |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@impl Import.Runner |
||||||
|
def timeout, do: @timeout |
||||||
|
|
||||||
|
@spec insert(Repo.t(), [map()], %{required(:timeout) => timeout(), required(:timestamps) => Import.timestamps()}) :: |
||||||
|
{:ok, [PolygonZkevmBridge.t()]} |
||||||
|
| {:error, [Changeset.t()]} |
||||||
|
def insert(repo, changes_list, %{timeout: timeout, timestamps: timestamps} = options) when is_list(changes_list) do |
||||||
|
on_conflict = Map.get_lazy(options, :on_conflict, &default_on_conflict/0) |
||||||
|
|
||||||
|
# Enforce PolygonZkevmBridge ShareLocks order (see docs: sharelock.md) |
||||||
|
ordered_changes_list = Enum.sort_by(changes_list, &{&1.type, &1.index}) |
||||||
|
|
||||||
|
{:ok, inserted} = |
||||||
|
Import.insert_changes_list( |
||||||
|
repo, |
||||||
|
ordered_changes_list, |
||||||
|
conflict_target: [:type, :index], |
||||||
|
on_conflict: on_conflict, |
||||||
|
for: PolygonZkevmBridge, |
||||||
|
returning: true, |
||||||
|
timeout: timeout, |
||||||
|
timestamps: timestamps |
||||||
|
) |
||||||
|
|
||||||
|
{:ok, inserted} |
||||||
|
end |
||||||
|
|
||||||
|
defp default_on_conflict do |
||||||
|
from( |
||||||
|
op in PolygonZkevmBridge, |
||||||
|
update: [ |
||||||
|
set: [ |
||||||
|
# Don't update `type` as it is part of the composite primary key and used for the conflict target |
||||||
|
# Don't update `index` as it is part of the composite primary key and used for the conflict target |
||||||
|
l1_transaction_hash: fragment("COALESCE(EXCLUDED.l1_transaction_hash, ?)", op.l1_transaction_hash), |
||||||
|
l2_transaction_hash: fragment("COALESCE(EXCLUDED.l2_transaction_hash, ?)", op.l2_transaction_hash), |
||||||
|
l1_token_id: fragment("COALESCE(EXCLUDED.l1_token_id, ?)", op.l1_token_id), |
||||||
|
l1_token_address: fragment("COALESCE(EXCLUDED.l1_token_address, ?)", op.l1_token_address), |
||||||
|
l2_token_address: fragment("COALESCE(EXCLUDED.l2_token_address, ?)", op.l2_token_address), |
||||||
|
amount: fragment("EXCLUDED.amount"), |
||||||
|
block_number: fragment("COALESCE(EXCLUDED.block_number, ?)", op.block_number), |
||||||
|
block_timestamp: fragment("COALESCE(EXCLUDED.block_timestamp, ?)", op.block_timestamp), |
||||||
|
inserted_at: fragment("LEAST(?, EXCLUDED.inserted_at)", op.inserted_at), |
||||||
|
updated_at: fragment("GREATEST(?, EXCLUDED.updated_at)", op.updated_at) |
||||||
|
] |
||||||
|
], |
||||||
|
where: |
||||||
|
fragment( |
||||||
|
"(EXCLUDED.l1_transaction_hash, EXCLUDED.l2_transaction_hash, EXCLUDED.l1_token_id, EXCLUDED.l1_token_address, EXCLUDED.l2_token_address, EXCLUDED.amount, EXCLUDED.block_number, EXCLUDED.block_timestamp) IS DISTINCT FROM (?, ?, ?, ?, ?, ?, ?, ?)", |
||||||
|
op.l1_transaction_hash, |
||||||
|
op.l2_transaction_hash, |
||||||
|
op.l1_token_id, |
||||||
|
op.l1_token_address, |
||||||
|
op.l2_token_address, |
||||||
|
op.amount, |
||||||
|
op.block_number, |
||||||
|
op.block_timestamp |
||||||
|
) |
||||||
|
) |
||||||
|
end |
||||||
|
end |
@ -1,15 +1,15 @@ |
|||||||
defmodule Explorer.Chain.Zkevm.BatchTransaction do |
defmodule Explorer.Chain.PolygonZkevm.BatchTransaction do |
||||||
@moduledoc "Models a list of transactions related to a batch for zkEVM." |
@moduledoc "Models a list of transactions related to a batch for zkEVM." |
||||||
|
|
||||||
use Explorer.Schema |
use Explorer.Schema |
||||||
|
|
||||||
alias Explorer.Chain.{Hash, Transaction} |
alias Explorer.Chain.{Hash, Transaction} |
||||||
alias Explorer.Chain.Zkevm.TransactionBatch |
alias Explorer.Chain.PolygonZkevm.TransactionBatch |
||||||
|
|
||||||
@required_attrs ~w(batch_number hash)a |
@required_attrs ~w(batch_number hash)a |
||||||
|
|
||||||
@primary_key false |
@primary_key false |
||||||
typed_schema "zkevm_batch_l2_transactions" do |
typed_schema "polygon_zkevm_batch_l2_transactions" do |
||||||
belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer, null: false) |
belongs_to(:batch, TransactionBatch, foreign_key: :batch_number, references: :number, type: :integer, null: false) |
||||||
|
|
||||||
belongs_to(:l2_transaction, Transaction, |
belongs_to(:l2_transaction, Transaction, |
@ -0,0 +1,55 @@ |
|||||||
|
defmodule Explorer.Chain.PolygonZkevm.Bridge do |
||||||
|
@moduledoc "Models a bridge operation for Polygon zkEVM." |
||||||
|
|
||||||
|
use Explorer.Schema |
||||||
|
|
||||||
|
alias Explorer.Chain.{Block, Hash, Token} |
||||||
|
alias Explorer.Chain.PolygonZkevm.BridgeL1Token |
||||||
|
|
||||||
|
@optional_attrs ~w(l1_transaction_hash l2_transaction_hash l1_token_id l2_token_address block_number block_timestamp)a |
||||||
|
|
||||||
|
@required_attrs ~w(type index amount)a |
||||||
|
|
||||||
|
@type t :: %__MODULE__{ |
||||||
|
type: String.t(), |
||||||
|
index: non_neg_integer(), |
||||||
|
l1_transaction_hash: Hash.t() | nil, |
||||||
|
l2_transaction_hash: Hash.t() | nil, |
||||||
|
l1_token: %Ecto.Association.NotLoaded{} | BridgeL1Token.t() | nil, |
||||||
|
l1_token_id: non_neg_integer() | nil, |
||||||
|
l1_token_address: Hash.Address.t() | nil, |
||||||
|
l2_token: %Ecto.Association.NotLoaded{} | Token.t() | nil, |
||||||
|
l2_token_address: Hash.Address.t() | nil, |
||||||
|
amount: Decimal.t(), |
||||||
|
block_number: Block.block_number() | nil, |
||||||
|
block_timestamp: DateTime.t() | nil |
||||||
|
} |
||||||
|
|
||||||
|
@primary_key false |
||||||
|
schema "polygon_zkevm_bridge" do |
||||||
|
field(:type, Ecto.Enum, values: [:deposit, :withdrawal], primary_key: true) |
||||||
|
field(:index, :integer, primary_key: true) |
||||||
|
field(:l1_transaction_hash, Hash.Full) |
||||||
|
field(:l2_transaction_hash, Hash.Full) |
||||||
|
belongs_to(:l1_token, BridgeL1Token, foreign_key: :l1_token_id, references: :id, type: :integer) |
||||||
|
field(:l1_token_address, Hash.Address) |
||||||
|
belongs_to(:l2_token, Token, foreign_key: :l2_token_address, references: :contract_address_hash, type: Hash.Address) |
||||||
|
field(:amount, :decimal) |
||||||
|
field(:block_number, :integer) |
||||||
|
field(:block_timestamp, :utc_datetime_usec) |
||||||
|
|
||||||
|
timestamps() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Checks that the `attrs` are valid. |
||||||
|
""" |
||||||
|
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() |
||||||
|
def changeset(%__MODULE__{} = operations, attrs \\ %{}) do |
||||||
|
operations |
||||||
|
|> cast(attrs, @required_attrs ++ @optional_attrs) |
||||||
|
|> validate_required(@required_attrs) |
||||||
|
|> unique_constraint([:type, :index]) |
||||||
|
|> foreign_key_constraint(:l1_token_id) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,37 @@ |
|||||||
|
defmodule Explorer.Chain.PolygonZkevm.BridgeL1Token do |
||||||
|
@moduledoc "Models a bridge token on L1 for Polygon zkEVM." |
||||||
|
|
||||||
|
use Explorer.Schema |
||||||
|
|
||||||
|
alias Explorer.Chain.Hash |
||||||
|
|
||||||
|
@optional_attrs ~w(decimals symbol)a |
||||||
|
|
||||||
|
@required_attrs ~w(address)a |
||||||
|
|
||||||
|
@type t :: %__MODULE__{ |
||||||
|
address: Hash.Address.t(), |
||||||
|
decimals: non_neg_integer() | nil, |
||||||
|
symbol: String.t() | nil |
||||||
|
} |
||||||
|
|
||||||
|
@primary_key {:id, :id, autogenerate: true} |
||||||
|
schema "polygon_zkevm_bridge_l1_tokens" do |
||||||
|
field(:address, Hash.Address) |
||||||
|
field(:decimals, :integer) |
||||||
|
field(:symbol, :string) |
||||||
|
|
||||||
|
timestamps() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Checks that the `attrs` are valid. |
||||||
|
""" |
||||||
|
@spec changeset(Ecto.Schema.t(), map()) :: Ecto.Schema.t() |
||||||
|
def changeset(%__MODULE__{} = tokens, attrs \\ %{}) do |
||||||
|
tokens |
||||||
|
|> cast(attrs, @required_attrs ++ @optional_attrs) |
||||||
|
|> validate_required(@required_attrs) |
||||||
|
|> unique_constraint(:id) |
||||||
|
end |
||||||
|
end |
@ -1,15 +1,15 @@ |
|||||||
defmodule Explorer.Chain.Zkevm.LifecycleTransaction do |
defmodule Explorer.Chain.PolygonZkevm.LifecycleTransaction do |
||||||
@moduledoc "Models an L1 lifecycle transaction for zkEVM." |
@moduledoc "Models an L1 lifecycle transaction for zkEVM." |
||||||
|
|
||||||
use Explorer.Schema |
use Explorer.Schema |
||||||
|
|
||||||
alias Explorer.Chain.Hash |
alias Explorer.Chain.Hash |
||||||
alias Explorer.Chain.Zkevm.TransactionBatch |
alias Explorer.Chain.PolygonZkevm.TransactionBatch |
||||||
|
|
||||||
@required_attrs ~w(id hash is_verify)a |
@required_attrs ~w(id hash is_verify)a |
||||||
|
|
||||||
@primary_key false |
@primary_key false |
||||||
typed_schema "zkevm_lifecycle_l1_transactions" do |
typed_schema "polygon_zkevm_lifecycle_l1_transactions" do |
||||||
field(:id, :integer, primary_key: true, null: false) |
field(:id, :integer, primary_key: true, null: false) |
||||||
field(:hash, Hash.Full, null: false) |
field(:hash, Hash.Full, null: false) |
||||||
field(:is_verify, :boolean, null: false) |
field(:is_verify, :boolean, null: false) |
@ -0,0 +1,321 @@ |
|||||||
|
defmodule Explorer.Chain.PolygonZkevm.Reader do |
||||||
|
@moduledoc "Contains read functions for zkevm modules." |
||||||
|
|
||||||
|
import Ecto.Query, |
||||||
|
only: [ |
||||||
|
from: 2, |
||||||
|
limit: 2, |
||||||
|
order_by: 2, |
||||||
|
where: 2, |
||||||
|
where: 3 |
||||||
|
] |
||||||
|
|
||||||
|
import Explorer.Chain, only: [select_repo: 1] |
||||||
|
|
||||||
|
alias Explorer.Chain.PolygonZkevm.{BatchTransaction, Bridge, BridgeL1Token, LifecycleTransaction, TransactionBatch} |
||||||
|
alias Explorer.{Chain, PagingOptions, Repo} |
||||||
|
alias Indexer.Helper |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Reads a batch by its number from database. |
||||||
|
If the number is :latest, gets the latest batch from `polygon_zkevm_transaction_batches` table. |
||||||
|
Returns {:error, :not_found} in case the batch is not found. |
||||||
|
""" |
||||||
|
@spec batch(non_neg_integer() | :latest, list()) :: {:ok, map()} | {:error, :not_found} |
||||||
|
def batch(number, options \\ []) |
||||||
|
|
||||||
|
def batch(:latest, options) when is_list(options) do |
||||||
|
TransactionBatch |
||||||
|
|> order_by(desc: :number) |
||||||
|
|> limit(1) |
||||||
|
|> select_repo(options).one() |
||||||
|
|> case do |
||||||
|
nil -> {:error, :not_found} |
||||||
|
batch -> {:ok, batch} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
def batch(number, options) when is_list(options) do |
||||||
|
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
||||||
|
|
||||||
|
TransactionBatch |
||||||
|
|> where(number: ^number) |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> select_repo(options).one() |
||||||
|
|> case do |
||||||
|
nil -> {:error, :not_found} |
||||||
|
batch -> {:ok, batch} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Reads a list of batches from `polygon_zkevm_transaction_batches` table. |
||||||
|
""" |
||||||
|
@spec batches(list()) :: list() |
||||||
|
def batches(options \\ []) do |
||||||
|
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
||||||
|
|
||||||
|
base_query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
order_by: [desc: tb.number] |
||||||
|
) |
||||||
|
|
||||||
|
query = |
||||||
|
if Keyword.get(options, :confirmed?, false) do |
||||||
|
base_query |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> where([tb], not is_nil(tb.sequence_id) and tb.sequence_id > 0) |
||||||
|
|> limit(10) |
||||||
|
else |
||||||
|
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) |
||||||
|
|
||||||
|
base_query |
||||||
|
|> Chain.join_associations(necessity_by_association) |
||||||
|
|> page_batches(paging_options) |
||||||
|
|> limit(^paging_options.page_size) |
||||||
|
end |
||||||
|
|
||||||
|
select_repo(options).all(query) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Reads a list of L2 transaction hashes from `polygon_zkevm_batch_l2_transactions` table. |
||||||
|
""" |
||||||
|
@spec batch_transactions(non_neg_integer(), list()) :: list() |
||||||
|
def batch_transactions(batch_number, options \\ []) do |
||||||
|
query = from(bts in BatchTransaction, where: bts.batch_number == ^batch_number) |
||||||
|
|
||||||
|
select_repo(options).all(query) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Tries to read L1 token data (address, symbol, decimals) for the given addresses |
||||||
|
from the database. If the data for an address is not found in Explorer.Chain.PolygonZkevm.BridgeL1Token, |
||||||
|
the address is returned in the list inside the tuple (the second item of the tuple). |
||||||
|
The first item of the returned tuple contains `L1 token address -> L1 token data` map. |
||||||
|
""" |
||||||
|
@spec get_token_data_from_db(list()) :: {map(), list()} |
||||||
|
def get_token_data_from_db(token_addresses) do |
||||||
|
# try to read token symbols and decimals from the database |
||||||
|
query = |
||||||
|
from( |
||||||
|
t in BridgeL1Token, |
||||||
|
where: t.address in ^token_addresses, |
||||||
|
select: {t.address, t.decimals, t.symbol} |
||||||
|
) |
||||||
|
|
||||||
|
token_data = |
||||||
|
query |
||||||
|
|> Repo.all() |
||||||
|
|> Enum.reduce(%{}, fn {address, decimals, symbol}, acc -> |
||||||
|
token_address = Helper.address_hash_to_string(address, true) |
||||||
|
Map.put(acc, token_address, %{symbol: symbol, decimals: decimals}) |
||||||
|
end) |
||||||
|
|
||||||
|
token_addresses_for_rpc = |
||||||
|
token_addresses |
||||||
|
|> Enum.reject(fn address -> |
||||||
|
Map.has_key?(token_data, Helper.address_hash_to_string(address, true)) |
||||||
|
end) |
||||||
|
|
||||||
|
{token_data, token_addresses_for_rpc} |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets last known L1 item (deposit) from polygon_zkevm_bridge table. |
||||||
|
Returns block number and L1 transaction hash bound to that deposit. |
||||||
|
If not found, returns zero block number and nil as the transaction hash. |
||||||
|
""" |
||||||
|
@spec last_l1_item() :: {non_neg_integer(), binary() | nil} |
||||||
|
def last_l1_item do |
||||||
|
query = |
||||||
|
from(b in Bridge, |
||||||
|
select: {b.block_number, b.l1_transaction_hash}, |
||||||
|
where: b.type == :deposit and not is_nil(b.block_number), |
||||||
|
order_by: [desc: b.index], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
|> Kernel.||({0, nil}) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets last known L2 item (withdrawal) from polygon_zkevm_bridge table. |
||||||
|
Returns block number and L2 transaction hash bound to that withdrawal. |
||||||
|
If not found, returns zero block number and nil as the transaction hash. |
||||||
|
""" |
||||||
|
@spec last_l2_item() :: {non_neg_integer(), binary() | nil} |
||||||
|
def last_l2_item do |
||||||
|
query = |
||||||
|
from(b in Bridge, |
||||||
|
select: {b.block_number, b.l2_transaction_hash}, |
||||||
|
where: b.type == :withdrawal and not is_nil(b.block_number), |
||||||
|
order_by: [desc: b.index], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
|> Kernel.||({0, nil}) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Gets the number of the latest batch with defined verify_id from `polygon_zkevm_transaction_batches` table. |
||||||
|
Returns 0 if not found. |
||||||
|
""" |
||||||
|
@spec last_verified_batch_number() :: non_neg_integer() |
||||||
|
def last_verified_batch_number do |
||||||
|
query = |
||||||
|
from(tb in TransactionBatch, |
||||||
|
select: tb.number, |
||||||
|
where: not is_nil(tb.verify_id), |
||||||
|
order_by: [desc: tb.number], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
|> Kernel.||(0) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Reads a list of L1 transactions by their hashes from `polygon_zkevm_lifecycle_l1_transactions` table. |
||||||
|
""" |
||||||
|
@spec lifecycle_transactions(list()) :: list() |
||||||
|
def lifecycle_transactions(l1_tx_hashes) do |
||||||
|
query = |
||||||
|
from( |
||||||
|
lt in LifecycleTransaction, |
||||||
|
select: {lt.hash, lt.id}, |
||||||
|
where: lt.hash in ^l1_tx_hashes |
||||||
|
) |
||||||
|
|
||||||
|
Repo.all(query, timeout: :infinity) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Determines ID of the future lifecycle transaction by reading `polygon_zkevm_lifecycle_l1_transactions` table. |
||||||
|
""" |
||||||
|
@spec next_id() :: non_neg_integer() |
||||||
|
def next_id do |
||||||
|
query = |
||||||
|
from(lt in LifecycleTransaction, |
||||||
|
select: lt.id, |
||||||
|
order_by: [desc: lt.id], |
||||||
|
limit: 1 |
||||||
|
) |
||||||
|
|
||||||
|
last_id = |
||||||
|
query |
||||||
|
|> Repo.one() |
||||||
|
|> Kernel.||(0) |
||||||
|
|
||||||
|
last_id + 1 |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Builds `L1 token address -> L1 token id` map for the given token addresses. |
||||||
|
The info is taken from Explorer.Chain.PolygonZkevm.BridgeL1Token. |
||||||
|
If an address is not in the table, it won't be in the resulting map. |
||||||
|
""" |
||||||
|
@spec token_addresses_to_ids_from_db(list()) :: map() |
||||||
|
def token_addresses_to_ids_from_db(addresses) do |
||||||
|
query = from(t in BridgeL1Token, select: {t.address, t.id}, where: t.address in ^addresses) |
||||||
|
|
||||||
|
query |
||||||
|
|> Repo.all(timeout: :infinity) |
||||||
|
|> Enum.reduce(%{}, fn {address, id}, acc -> |
||||||
|
Map.put(acc, Helper.address_hash_to_string(address), id) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Retrieves a list of Polygon zkEVM deposits (completed and unclaimed) |
||||||
|
sorted in descending order of the index. |
||||||
|
""" |
||||||
|
@spec deposits(list()) :: list() |
||||||
|
def deposits(options \\ []) do |
||||||
|
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) |
||||||
|
|
||||||
|
base_query = |
||||||
|
from( |
||||||
|
b in Bridge, |
||||||
|
left_join: t1 in assoc(b, :l1_token), |
||||||
|
left_join: t2 in assoc(b, :l2_token), |
||||||
|
where: b.type == :deposit and not is_nil(b.l1_transaction_hash), |
||||||
|
preload: [l1_token: t1, l2_token: t2], |
||||||
|
order_by: [desc: b.index] |
||||||
|
) |
||||||
|
|
||||||
|
base_query |
||||||
|
|> page_deposits_or_withdrawals(paging_options) |
||||||
|
|> limit(^paging_options.page_size) |
||||||
|
|> select_repo(options).all() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Returns a total number of Polygon zkEVM deposits (completed and unclaimed). |
||||||
|
""" |
||||||
|
@spec deposits_count(list()) :: term() | nil |
||||||
|
def deposits_count(options \\ []) do |
||||||
|
query = |
||||||
|
from( |
||||||
|
b in Bridge, |
||||||
|
where: b.type == :deposit and not is_nil(b.l1_transaction_hash) |
||||||
|
) |
||||||
|
|
||||||
|
select_repo(options).aggregate(query, :count, timeout: :infinity) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Retrieves a list of Polygon zkEVM withdrawals (completed and unclaimed) |
||||||
|
sorted in descending order of the index. |
||||||
|
""" |
||||||
|
@spec withdrawals(list()) :: list() |
||||||
|
def withdrawals(options \\ []) do |
||||||
|
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) |
||||||
|
|
||||||
|
base_query = |
||||||
|
from( |
||||||
|
b in Bridge, |
||||||
|
left_join: t1 in assoc(b, :l1_token), |
||||||
|
left_join: t2 in assoc(b, :l2_token), |
||||||
|
where: b.type == :withdrawal and not is_nil(b.l2_transaction_hash), |
||||||
|
preload: [l1_token: t1, l2_token: t2], |
||||||
|
order_by: [desc: b.index] |
||||||
|
) |
||||||
|
|
||||||
|
base_query |
||||||
|
|> page_deposits_or_withdrawals(paging_options) |
||||||
|
|> limit(^paging_options.page_size) |
||||||
|
|> select_repo(options).all() |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Returns a total number of Polygon zkEVM withdrawals (completed and unclaimed). |
||||||
|
""" |
||||||
|
@spec withdrawals_count(list()) :: term() | nil |
||||||
|
def withdrawals_count(options \\ []) do |
||||||
|
query = |
||||||
|
from( |
||||||
|
b in Bridge, |
||||||
|
where: b.type == :withdrawal and not is_nil(b.l2_transaction_hash) |
||||||
|
) |
||||||
|
|
||||||
|
select_repo(options).aggregate(query, :count, timeout: :infinity) |
||||||
|
end |
||||||
|
|
||||||
|
defp page_batches(query, %PagingOptions{key: nil}), do: query |
||||||
|
|
||||||
|
defp page_batches(query, %PagingOptions{key: {number}}) do |
||||||
|
from(tb in query, where: tb.number < ^number) |
||||||
|
end |
||||||
|
|
||||||
|
defp page_deposits_or_withdrawals(query, %PagingOptions{key: nil}), do: query |
||||||
|
|
||||||
|
defp page_deposits_or_withdrawals(query, %PagingOptions{key: {index}}) do |
||||||
|
from(b in query, where: b.index < ^index) |
||||||
|
end |
||||||
|
end |
@ -1,17 +1,17 @@ |
|||||||
defmodule Explorer.Chain.Zkevm.TransactionBatch do |
defmodule Explorer.Chain.PolygonZkevm.TransactionBatch do |
||||||
@moduledoc "Models a batch of transactions for zkEVM." |
@moduledoc "Models a batch of transactions for zkEVM." |
||||||
|
|
||||||
use Explorer.Schema |
use Explorer.Schema |
||||||
|
|
||||||
alias Explorer.Chain.Hash |
alias Explorer.Chain.Hash |
||||||
alias Explorer.Chain.Zkevm.{BatchTransaction, LifecycleTransaction} |
alias Explorer.Chain.PolygonZkevm.{BatchTransaction, LifecycleTransaction} |
||||||
|
|
||||||
@optional_attrs ~w(sequence_id verify_id)a |
@optional_attrs ~w(sequence_id verify_id)a |
||||||
|
|
||||||
@required_attrs ~w(number timestamp l2_transactions_count global_exit_root acc_input_hash state_root)a |
@required_attrs ~w(number timestamp l2_transactions_count global_exit_root acc_input_hash state_root)a |
||||||
|
|
||||||
@primary_key false |
@primary_key false |
||||||
typed_schema "zkevm_transaction_batches" do |
typed_schema "polygon_zkevm_transaction_batches" do |
||||||
field(:number, :integer, primary_key: true, null: false) |
field(:number, :integer, primary_key: true, null: false) |
||||||
field(:timestamp, :utc_datetime_usec) |
field(:timestamp, :utc_datetime_usec) |
||||||
field(:l2_transactions_count, :integer) |
field(:l2_transactions_count, :integer) |
@ -1,149 +0,0 @@ |
|||||||
defmodule Explorer.Chain.Zkevm.Reader do |
|
||||||
@moduledoc "Contains read functions for zkevm modules." |
|
||||||
|
|
||||||
import Ecto.Query, |
|
||||||
only: [ |
|
||||||
from: 2, |
|
||||||
limit: 2, |
|
||||||
order_by: 2, |
|
||||||
where: 2, |
|
||||||
where: 3 |
|
||||||
] |
|
||||||
|
|
||||||
import Explorer.Chain, only: [select_repo: 1] |
|
||||||
|
|
||||||
alias Explorer.Chain.Zkevm.{BatchTransaction, LifecycleTransaction, TransactionBatch} |
|
||||||
alias Explorer.{Chain, PagingOptions, Repo} |
|
||||||
|
|
||||||
@doc """ |
|
||||||
Reads a batch by its number from database. |
|
||||||
If the number is :latest, gets the latest batch from `zkevm_transaction_batches` table. |
|
||||||
Returns {:error, :not_found} in case the batch is not found. |
|
||||||
""" |
|
||||||
@spec batch(non_neg_integer() | :latest, list()) :: {:ok, map()} | {:error, :not_found} |
|
||||||
def batch(number, options \\ []) |
|
||||||
|
|
||||||
def batch(:latest, options) when is_list(options) do |
|
||||||
TransactionBatch |
|
||||||
|> order_by(desc: :number) |
|
||||||
|> limit(1) |
|
||||||
|> select_repo(options).one() |
|
||||||
|> case do |
|
||||||
nil -> {:error, :not_found} |
|
||||||
batch -> {:ok, batch} |
|
||||||
end |
|
||||||
end |
|
||||||
|
|
||||||
def batch(number, options) when is_list(options) do |
|
||||||
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
|
||||||
|
|
||||||
TransactionBatch |
|
||||||
|> where(number: ^number) |
|
||||||
|> Chain.join_associations(necessity_by_association) |
|
||||||
|> select_repo(options).one() |
|
||||||
|> case do |
|
||||||
nil -> {:error, :not_found} |
|
||||||
batch -> {:ok, batch} |
|
||||||
end |
|
||||||
end |
|
||||||
|
|
||||||
@doc """ |
|
||||||
Reads a list of batches from `zkevm_transaction_batches` table. |
|
||||||
""" |
|
||||||
@spec batches(list()) :: list() |
|
||||||
def batches(options \\ []) do |
|
||||||
necessity_by_association = Keyword.get(options, :necessity_by_association, %{}) |
|
||||||
|
|
||||||
base_query = |
|
||||||
from(tb in TransactionBatch, |
|
||||||
order_by: [desc: tb.number] |
|
||||||
) |
|
||||||
|
|
||||||
query = |
|
||||||
if Keyword.get(options, :confirmed?, false) do |
|
||||||
base_query |
|
||||||
|> Chain.join_associations(necessity_by_association) |
|
||||||
|> where([tb], not is_nil(tb.sequence_id) and tb.sequence_id > 0) |
|
||||||
|> limit(10) |
|
||||||
else |
|
||||||
paging_options = Keyword.get(options, :paging_options, Chain.default_paging_options()) |
|
||||||
|
|
||||||
base_query |
|
||||||
|> Chain.join_associations(necessity_by_association) |
|
||||||
|> page_batches(paging_options) |
|
||||||
|> limit(^paging_options.page_size) |
|
||||||
end |
|
||||||
|
|
||||||
select_repo(options).all(query) |
|
||||||
end |
|
||||||
|
|
||||||
@doc """ |
|
||||||
Reads a list of L2 transaction hashes from `zkevm_batch_l2_transactions` table. |
|
||||||
""" |
|
||||||
@spec batch_transactions(non_neg_integer(), list()) :: list() |
|
||||||
def batch_transactions(batch_number, options \\ []) do |
|
||||||
query = from(bts in BatchTransaction, where: bts.batch_number == ^batch_number) |
|
||||||
|
|
||||||
select_repo(options).all(query) |
|
||||||
end |
|
||||||
|
|
||||||
@doc """ |
|
||||||
Gets the number of the latest batch with defined verify_id from `zkevm_transaction_batches` table. |
|
||||||
Returns 0 if not found. |
|
||||||
""" |
|
||||||
@spec last_verified_batch_number() :: non_neg_integer() |
|
||||||
def last_verified_batch_number do |
|
||||||
query = |
|
||||||
from(tb in TransactionBatch, |
|
||||||
select: tb.number, |
|
||||||
where: not is_nil(tb.verify_id), |
|
||||||
order_by: [desc: tb.number], |
|
||||||
limit: 1 |
|
||||||
) |
|
||||||
|
|
||||||
query |
|
||||||
|> Repo.one() |
|
||||||
|> Kernel.||(0) |
|
||||||
end |
|
||||||
|
|
||||||
@doc """ |
|
||||||
Reads a list of L1 transactions by their hashes from `zkevm_lifecycle_l1_transactions` table. |
|
||||||
""" |
|
||||||
@spec lifecycle_transactions(list()) :: list() |
|
||||||
def lifecycle_transactions(l1_tx_hashes) do |
|
||||||
query = |
|
||||||
from( |
|
||||||
lt in LifecycleTransaction, |
|
||||||
select: {lt.hash, lt.id}, |
|
||||||
where: lt.hash in ^l1_tx_hashes |
|
||||||
) |
|
||||||
|
|
||||||
Repo.all(query, timeout: :infinity) |
|
||||||
end |
|
||||||
|
|
||||||
@doc """ |
|
||||||
Determines ID of the future lifecycle transaction by reading `zkevm_lifecycle_l1_transactions` table. |
|
||||||
""" |
|
||||||
@spec next_id() :: non_neg_integer() |
|
||||||
def next_id do |
|
||||||
query = |
|
||||||
from(lt in LifecycleTransaction, |
|
||||||
select: lt.id, |
|
||||||
order_by: [desc: lt.id], |
|
||||||
limit: 1 |
|
||||||
) |
|
||||||
|
|
||||||
last_id = |
|
||||||
query |
|
||||||
|> Repo.one() |
|
||||||
|> Kernel.||(0) |
|
||||||
|
|
||||||
last_id + 1 |
|
||||||
end |
|
||||||
|
|
||||||
defp page_batches(query, %PagingOptions{key: nil}), do: query |
|
||||||
|
|
||||||
defp page_batches(query, %PagingOptions{key: {number}}) do |
|
||||||
from(tb in query, where: tb.number < ^number) |
|
||||||
end |
|
||||||
end |
|
@ -0,0 +1,46 @@ |
|||||||
|
defmodule Explorer.Repo.PolygonZkevm.Migrations.AddBridgeTables do |
||||||
|
use Ecto.Migration |
||||||
|
|
||||||
|
def change do |
||||||
|
create table(:polygon_zkevm_bridge_l1_tokens, primary_key: false) do |
||||||
|
add(:id, :identity, primary_key: true, start_value: 0, increment: 1) |
||||||
|
add(:address, :bytea, null: false) |
||||||
|
add(:decimals, :smallint, null: true, default: nil) |
||||||
|
add(:symbol, :string, size: 16, null: true, default: nil) |
||||||
|
timestamps(null: false, type: :utc_datetime_usec) |
||||||
|
end |
||||||
|
|
||||||
|
create(unique_index(:polygon_zkevm_bridge_l1_tokens, :address)) |
||||||
|
|
||||||
|
execute( |
||||||
|
"CREATE TYPE polygon_zkevm_bridge_op_type AS ENUM ('deposit', 'withdrawal')", |
||||||
|
"DROP TYPE polygon_zkevm_bridge_op_type" |
||||||
|
) |
||||||
|
|
||||||
|
create table(:polygon_zkevm_bridge, primary_key: false) do |
||||||
|
add(:type, :polygon_zkevm_bridge_op_type, null: false, primary_key: true) |
||||||
|
add(:index, :integer, null: false, primary_key: true) |
||||||
|
add(:l1_transaction_hash, :bytea, null: true) |
||||||
|
add(:l2_transaction_hash, :bytea, null: true) |
||||||
|
|
||||||
|
add( |
||||||
|
:l1_token_id, |
||||||
|
references(:polygon_zkevm_bridge_l1_tokens, on_delete: :restrict, on_update: :update_all, type: :identity), |
||||||
|
null: true |
||||||
|
) |
||||||
|
|
||||||
|
add(:l1_token_address, :bytea, null: true) |
||||||
|
add(:l2_token_address, :bytea, null: true) |
||||||
|
add(:amount, :numeric, precision: 100, null: false) |
||||||
|
add(:block_number, :bigint, null: true) |
||||||
|
add(:block_timestamp, :"timestamp without time zone", null: true) |
||||||
|
timestamps(null: false, type: :utc_datetime_usec) |
||||||
|
end |
||||||
|
|
||||||
|
create(index(:polygon_zkevm_bridge, :l1_token_address)) |
||||||
|
|
||||||
|
rename(table(:zkevm_lifecycle_l1_transactions), to: table(:polygon_zkevm_lifecycle_l1_transactions)) |
||||||
|
rename(table(:zkevm_transaction_batches), to: table(:polygon_zkevm_transaction_batches)) |
||||||
|
rename(table(:zkevm_batch_l2_transactions), to: table(:polygon_zkevm_batch_l2_transactions)) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,413 @@ |
|||||||
|
defmodule Indexer.Fetcher.PolygonZkevm.Bridge do |
||||||
|
@moduledoc """ |
||||||
|
Contains common functions for Indexer.Fetcher.PolygonZkevm.Bridge* modules. |
||||||
|
""" |
||||||
|
|
||||||
|
require Logger |
||||||
|
|
||||||
|
import EthereumJSONRPC, |
||||||
|
only: [ |
||||||
|
integer_to_quantity: 1, |
||||||
|
json_rpc: 2, |
||||||
|
quantity_to_integer: 1, |
||||||
|
request: 1, |
||||||
|
timestamp_to_datetime: 1 |
||||||
|
] |
||||||
|
|
||||||
|
import Explorer.Chain.SmartContract, only: [burn_address_hash_string: 0] |
||||||
|
|
||||||
|
import Explorer.Helper, only: [decode_data: 2] |
||||||
|
|
||||||
|
alias EthereumJSONRPC.Logs |
||||||
|
alias Explorer.Chain |
||||||
|
alias Explorer.Chain.PolygonZkevm.Reader |
||||||
|
alias Explorer.SmartContract.Reader, as: SmartContractReader |
||||||
|
alias Indexer.Helper |
||||||
|
alias Indexer.Transform.Addresses |
||||||
|
|
||||||
|
# 32-byte signature of the event BridgeEvent(uint8 leafType, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, uint32 depositCount) |
||||||
|
@bridge_event "0x501781209a1f8899323b96b4ef08b168df93e0a90c673d1e4cce39366cb62f9b" |
||||||
|
@bridge_event_params [{:uint, 8}, {:uint, 32}, :address, {:uint, 32}, :address, {:uint, 256}, :bytes, {:uint, 32}] |
||||||
|
|
||||||
|
# 32-byte signature of the event ClaimEvent(uint32 index, uint32 originNetwork, address originAddress, address destinationAddress, uint256 amount) |
||||||
|
@claim_event "0x25308c93ceeed162da955b3f7ce3e3f93606579e40fb92029faa9efe27545983" |
||||||
|
@claim_event_params [{:uint, 32}, {:uint, 32}, :address, :address, {:uint, 256}] |
||||||
|
|
||||||
|
@symbol_method_selector "95d89b41" |
||||||
|
@decimals_method_selector "313ce567" |
||||||
|
|
||||||
|
@erc20_abi [ |
||||||
|
%{ |
||||||
|
"constant" => true, |
||||||
|
"inputs" => [], |
||||||
|
"name" => "symbol", |
||||||
|
"outputs" => [%{"name" => "", "type" => "string"}], |
||||||
|
"payable" => false, |
||||||
|
"stateMutability" => "view", |
||||||
|
"type" => "function" |
||||||
|
}, |
||||||
|
%{ |
||||||
|
"constant" => true, |
||||||
|
"inputs" => [], |
||||||
|
"name" => "decimals", |
||||||
|
"outputs" => [%{"name" => "", "type" => "uint8"}], |
||||||
|
"payable" => false, |
||||||
|
"stateMutability" => "view", |
||||||
|
"type" => "function" |
||||||
|
} |
||||||
|
] |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Filters the given list of events keeping only `BridgeEvent` and `ClaimEvent` ones |
||||||
|
emitted by the bridge contract. |
||||||
|
""" |
||||||
|
@spec filter_bridge_events(list(), binary()) :: list() |
||||||
|
def filter_bridge_events(events, bridge_contract) do |
||||||
|
Enum.filter(events, fn event -> |
||||||
|
Helper.address_hash_to_string(event.address_hash, true) == bridge_contract and |
||||||
|
Enum.member?([@bridge_event, @claim_event], Helper.log_topic_to_string(event.first_topic)) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches `BridgeEvent` and `ClaimEvent` events of the bridge contract from an RPC node |
||||||
|
for the given range of blocks. |
||||||
|
""" |
||||||
|
@spec get_logs_all({non_neg_integer(), non_neg_integer()}, binary(), list()) :: list() |
||||||
|
def get_logs_all({chunk_start, chunk_end}, bridge_contract, json_rpc_named_arguments) do |
||||||
|
{:ok, result} = |
||||||
|
get_logs( |
||||||
|
chunk_start, |
||||||
|
chunk_end, |
||||||
|
bridge_contract, |
||||||
|
[[@bridge_event, @claim_event]], |
||||||
|
json_rpc_named_arguments |
||||||
|
) |
||||||
|
|
||||||
|
Logs.elixir_to_params(result) |
||||||
|
end |
||||||
|
|
||||||
|
defp get_logs(from_block, to_block, address, topics, json_rpc_named_arguments, retries \\ 100_000_000) do |
||||||
|
processed_from_block = if is_integer(from_block), do: integer_to_quantity(from_block), else: from_block |
||||||
|
processed_to_block = if is_integer(to_block), do: integer_to_quantity(to_block), else: to_block |
||||||
|
|
||||||
|
req = |
||||||
|
request(%{ |
||||||
|
id: 0, |
||||||
|
method: "eth_getLogs", |
||||||
|
params: [ |
||||||
|
%{ |
||||||
|
:fromBlock => processed_from_block, |
||||||
|
:toBlock => processed_to_block, |
||||||
|
:address => address, |
||||||
|
:topics => topics |
||||||
|
} |
||||||
|
] |
||||||
|
}) |
||||||
|
|
||||||
|
error_message = &"Cannot fetch logs for the block range #{from_block}..#{to_block}. Error: #{inspect(&1)}" |
||||||
|
|
||||||
|
Helper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, retries) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Imports the given zkEVM bridge operations into database. |
||||||
|
Used by Indexer.Fetcher.PolygonZkevm.BridgeL1 and Indexer.Fetcher.PolygonZkevm.BridgeL2 fetchers. |
||||||
|
Doesn't return anything. |
||||||
|
""" |
||||||
|
@spec import_operations(list()) :: no_return() |
||||||
|
def import_operations(operations) do |
||||||
|
addresses = |
||||||
|
Addresses.extract_addresses(%{ |
||||||
|
polygon_zkevm_bridge_operations: operations |
||||||
|
}) |
||||||
|
|
||||||
|
{:ok, _} = |
||||||
|
Chain.import(%{ |
||||||
|
addresses: %{params: addresses, on_conflict: :nothing}, |
||||||
|
polygon_zkevm_bridge_operations: %{params: operations}, |
||||||
|
timeout: :infinity |
||||||
|
}) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Converts the list of zkEVM bridge events to the list of operations |
||||||
|
preparing them for importing to the database. |
||||||
|
""" |
||||||
|
@spec prepare_operations(list(), list() | nil, list(), map() | nil) :: list() |
||||||
|
def prepare_operations(events, json_rpc_named_arguments, json_rpc_named_arguments_l1, block_to_timestamp \\ nil) do |
||||||
|
{block_to_timestamp, token_address_to_id} = |
||||||
|
if is_nil(block_to_timestamp) do |
||||||
|
bridge_events = Enum.filter(events, fn event -> event.first_topic == @bridge_event end) |
||||||
|
|
||||||
|
l1_token_addresses = |
||||||
|
bridge_events |
||||||
|
|> Enum.reduce(%MapSet{}, fn event, acc -> |
||||||
|
case bridge_event_parse(event) do |
||||||
|
{{nil, _}, _, _} -> acc |
||||||
|
{{token_address, nil}, _, _} -> MapSet.put(acc, token_address) |
||||||
|
end |
||||||
|
end) |
||||||
|
|> MapSet.to_list() |
||||||
|
|
||||||
|
{ |
||||||
|
blocks_to_timestamps(bridge_events, json_rpc_named_arguments), |
||||||
|
token_addresses_to_ids(l1_token_addresses, json_rpc_named_arguments_l1) |
||||||
|
} |
||||||
|
else |
||||||
|
# this is called in realtime |
||||||
|
{block_to_timestamp, %{}} |
||||||
|
end |
||||||
|
|
||||||
|
Enum.map(events, fn event -> |
||||||
|
{index, l1_token_id, l1_token_address, l2_token_address, amount, block_number, block_timestamp} = |
||||||
|
if event.first_topic == @bridge_event do |
||||||
|
{ |
||||||
|
{l1_token_address, l2_token_address}, |
||||||
|
amount, |
||||||
|
deposit_count |
||||||
|
} = bridge_event_parse(event) |
||||||
|
|
||||||
|
l1_token_id = Map.get(token_address_to_id, l1_token_address) |
||||||
|
block_number = quantity_to_integer(event.block_number) |
||||||
|
block_timestamp = Map.get(block_to_timestamp, block_number) |
||||||
|
|
||||||
|
# credo:disable-for-lines:2 Credo.Check.Refactor.Nesting |
||||||
|
l1_token_address = |
||||||
|
if is_nil(l1_token_id) do |
||||||
|
l1_token_address |
||||||
|
end |
||||||
|
|
||||||
|
{deposit_count, l1_token_id, l1_token_address, l2_token_address, amount, block_number, block_timestamp} |
||||||
|
else |
||||||
|
[index, _origin_network, _origin_address, _destination_address, amount] = |
||||||
|
decode_data(event.data, @claim_event_params) |
||||||
|
|
||||||
|
{index, nil, nil, nil, amount, nil, nil} |
||||||
|
end |
||||||
|
|
||||||
|
is_l1 = json_rpc_named_arguments == json_rpc_named_arguments_l1 |
||||||
|
|
||||||
|
result = %{ |
||||||
|
type: operation_type(event.first_topic, is_l1), |
||||||
|
index: index, |
||||||
|
amount: amount |
||||||
|
} |
||||||
|
|
||||||
|
transaction_hash_field = |
||||||
|
if is_l1 do |
||||||
|
:l1_transaction_hash |
||||||
|
else |
||||||
|
:l2_transaction_hash |
||||||
|
end |
||||||
|
|
||||||
|
result |
||||||
|
|> extend_result(transaction_hash_field, event.transaction_hash) |
||||||
|
|> extend_result(:l1_token_id, l1_token_id) |
||||||
|
|> extend_result(:l1_token_address, l1_token_address) |
||||||
|
|> extend_result(:l2_token_address, l2_token_address) |
||||||
|
|> extend_result(:block_number, block_number) |
||||||
|
|> extend_result(:block_timestamp, block_timestamp) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
defp blocks_to_timestamps(events, json_rpc_named_arguments) do |
||||||
|
events |
||||||
|
|> Helper.get_blocks_by_events(json_rpc_named_arguments, 100_000_000) |
||||||
|
|> Enum.reduce(%{}, fn block, acc -> |
||||||
|
block_number = quantity_to_integer(Map.get(block, "number")) |
||||||
|
timestamp = timestamp_to_datetime(Map.get(block, "timestamp")) |
||||||
|
Map.put(acc, block_number, timestamp) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
defp bridge_event_parse(event) do |
||||||
|
[ |
||||||
|
leaf_type, |
||||||
|
origin_network, |
||||||
|
origin_address, |
||||||
|
_destination_network, |
||||||
|
_destination_address, |
||||||
|
amount, |
||||||
|
_metadata, |
||||||
|
deposit_count |
||||||
|
] = decode_data(event.data, @bridge_event_params) |
||||||
|
|
||||||
|
{token_address_by_origin_address(origin_address, origin_network, leaf_type), amount, deposit_count} |
||||||
|
end |
||||||
|
|
||||||
|
defp operation_type(first_topic, is_l1) do |
||||||
|
if first_topic == @bridge_event do |
||||||
|
if is_l1, do: :deposit, else: :withdrawal |
||||||
|
else |
||||||
|
if is_l1, do: :withdrawal, else: :deposit |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches L1 token data for the given token addresses, |
||||||
|
builds `L1 token address -> L1 token id` map for them, |
||||||
|
and writes the data to the database. Returns the resulting map. |
||||||
|
""" |
||||||
|
@spec token_addresses_to_ids(list(), list()) :: map() |
||||||
|
def token_addresses_to_ids(l1_token_addresses, json_rpc_named_arguments) do |
||||||
|
token_data = |
||||||
|
l1_token_addresses |
||||||
|
|> get_token_data(json_rpc_named_arguments) |
||||||
|
|
||||||
|
tokens_existing = |
||||||
|
token_data |
||||||
|
|> Map.keys() |
||||||
|
|> Reader.token_addresses_to_ids_from_db() |
||||||
|
|
||||||
|
tokens_to_insert = |
||||||
|
token_data |
||||||
|
|> Enum.reject(fn {address, _} -> Map.has_key?(tokens_existing, address) end) |
||||||
|
|> Enum.map(fn {address, data} -> Map.put(data, :address, address) end) |
||||||
|
|
||||||
|
{:ok, inserts} = |
||||||
|
Chain.import(%{ |
||||||
|
polygon_zkevm_bridge_l1_tokens: %{params: tokens_to_insert}, |
||||||
|
timeout: :infinity |
||||||
|
}) |
||||||
|
|
||||||
|
tokens_inserted = Map.get(inserts, :insert_polygon_zkevm_bridge_l1_tokens, []) |
||||||
|
|
||||||
|
# we need to query not inserted tokens from DB separately as they |
||||||
|
# could be inserted by another module at the same time (a race condition). |
||||||
|
# this is an unlikely case but we handle it here as well |
||||||
|
tokens_not_inserted = |
||||||
|
tokens_to_insert |
||||||
|
|> Enum.reject(fn token -> |
||||||
|
Enum.any?(tokens_inserted, fn inserted -> token.address == Helper.address_hash_to_string(inserted.address) end) |
||||||
|
end) |
||||||
|
|> Enum.map(& &1.address) |
||||||
|
|
||||||
|
tokens_inserted_outside = Reader.token_addresses_to_ids_from_db(tokens_not_inserted) |
||||||
|
|
||||||
|
tokens_inserted |
||||||
|
|> Enum.reduce(%{}, fn t, acc -> Map.put(acc, Helper.address_hash_to_string(t.address), t.id) end) |
||||||
|
|> Map.merge(tokens_existing) |
||||||
|
|> Map.merge(tokens_inserted_outside) |
||||||
|
end |
||||||
|
|
||||||
|
defp token_address_by_origin_address(origin_address, origin_network, leaf_type) do |
||||||
|
with true <- leaf_type != 1 and origin_network <= 1, |
||||||
|
token_address = "0x" <> Base.encode16(origin_address, case: :lower), |
||||||
|
true <- token_address != burn_address_hash_string() do |
||||||
|
if origin_network == 0 do |
||||||
|
# this is L1 address |
||||||
|
{token_address, nil} |
||||||
|
else |
||||||
|
# this is L2 address |
||||||
|
{nil, token_address} |
||||||
|
end |
||||||
|
else |
||||||
|
_ -> {nil, nil} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp get_token_data(token_addresses, json_rpc_named_arguments) do |
||||||
|
# first, we're trying to read token data from the DB. |
||||||
|
# if tokens are not in the DB, read them through RPC. |
||||||
|
token_addresses |
||||||
|
|> Reader.get_token_data_from_db() |
||||||
|
|> get_token_data_from_rpc(json_rpc_named_arguments) |
||||||
|
end |
||||||
|
|
||||||
|
defp get_token_data_from_rpc({token_data, token_addresses}, json_rpc_named_arguments) do |
||||||
|
{requests, responses} = get_token_data_request_symbol_decimals(token_addresses, json_rpc_named_arguments) |
||||||
|
|
||||||
|
requests |
||||||
|
|> Enum.zip(responses) |
||||||
|
|> Enum.reduce(token_data, fn {request, {status, response} = _resp}, token_data_acc -> |
||||||
|
if status == :ok do |
||||||
|
response = parse_response(response) |
||||||
|
|
||||||
|
address = Helper.address_hash_to_string(request.contract_address, true) |
||||||
|
|
||||||
|
new_data = get_new_data(token_data_acc[address] || %{}, request, response) |
||||||
|
|
||||||
|
Map.put(token_data_acc, address, new_data) |
||||||
|
else |
||||||
|
token_data_acc |
||||||
|
end |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
defp get_token_data_request_symbol_decimals(token_addresses, json_rpc_named_arguments) do |
||||||
|
requests = |
||||||
|
token_addresses |
||||||
|
|> Enum.map(fn address -> |
||||||
|
# we will call symbol() and decimals() public getters |
||||||
|
Enum.map([@symbol_method_selector, @decimals_method_selector], fn method_id -> |
||||||
|
%{ |
||||||
|
contract_address: address, |
||||||
|
method_id: method_id, |
||||||
|
args: [] |
||||||
|
} |
||||||
|
end) |
||||||
|
end) |
||||||
|
|> List.flatten() |
||||||
|
|
||||||
|
{responses, error_messages} = read_contracts_with_retries(requests, @erc20_abi, json_rpc_named_arguments, 3) |
||||||
|
|
||||||
|
if !Enum.empty?(error_messages) or Enum.count(requests) != Enum.count(responses) do |
||||||
|
Logger.warning( |
||||||
|
"Cannot read symbol and decimals of an ERC-20 token contract. Error messages: #{Enum.join(error_messages, ", ")}. Addresses: #{Enum.join(token_addresses, ", ")}" |
||||||
|
) |
||||||
|
end |
||||||
|
|
||||||
|
{requests, responses} |
||||||
|
end |
||||||
|
|
||||||
|
defp read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left) when retries_left > 0 do |
||||||
|
responses = SmartContractReader.query_contracts(requests, abi, json_rpc_named_arguments: json_rpc_named_arguments) |
||||||
|
|
||||||
|
error_messages = |
||||||
|
Enum.reduce(responses, [], fn {status, error_message}, acc -> |
||||||
|
acc ++ |
||||||
|
if status == :error do |
||||||
|
[error_message] |
||||||
|
else |
||||||
|
[] |
||||||
|
end |
||||||
|
end) |
||||||
|
|
||||||
|
if Enum.empty?(error_messages) do |
||||||
|
{responses, []} |
||||||
|
else |
||||||
|
retries_left = retries_left - 1 |
||||||
|
|
||||||
|
if retries_left == 0 do |
||||||
|
{responses, Enum.uniq(error_messages)} |
||||||
|
else |
||||||
|
:timer.sleep(1000) |
||||||
|
read_contracts_with_retries(requests, abi, json_rpc_named_arguments, retries_left) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp get_new_data(data, request, response) do |
||||||
|
if atomized_key(request.method_id) == :symbol do |
||||||
|
Map.put(data, :symbol, response) |
||||||
|
else |
||||||
|
Map.put(data, :decimals, response) |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp extend_result(result, _key, value) when is_nil(value), do: result |
||||||
|
defp extend_result(result, key, value) when is_atom(key), do: Map.put(result, key, value) |
||||||
|
|
||||||
|
defp atomized_key("symbol"), do: :symbol |
||||||
|
defp atomized_key("decimals"), do: :decimals |
||||||
|
defp atomized_key(@symbol_method_selector), do: :symbol |
||||||
|
defp atomized_key(@decimals_method_selector), do: :decimals |
||||||
|
|
||||||
|
defp parse_response(response) do |
||||||
|
case response do |
||||||
|
[item] -> item |
||||||
|
items -> items |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,210 @@ |
|||||||
|
defmodule Indexer.Fetcher.PolygonZkevm.BridgeL1 do |
||||||
|
@moduledoc """ |
||||||
|
Fills polygon_zkevm_bridge DB table. |
||||||
|
""" |
||||||
|
|
||||||
|
use GenServer |
||||||
|
use Indexer.Fetcher |
||||||
|
|
||||||
|
require Logger |
||||||
|
|
||||||
|
import Ecto.Query |
||||||
|
import Explorer.Helper, only: [parse_integer: 1] |
||||||
|
|
||||||
|
import Indexer.Fetcher.PolygonZkevm.Bridge, |
||||||
|
only: [get_logs_all: 3, import_operations: 1, prepare_operations: 3] |
||||||
|
|
||||||
|
alias Explorer.Chain.PolygonZkevm.{Bridge, Reader} |
||||||
|
alias Explorer.Repo |
||||||
|
alias Indexer.Fetcher.RollupL1ReorgMonitor |
||||||
|
alias Indexer.Helper |
||||||
|
|
||||||
|
@eth_get_logs_range_size 1000 |
||||||
|
@fetcher_name :polygon_zkevm_bridge_l1 |
||||||
|
|
||||||
|
def child_spec(start_link_arguments) do |
||||||
|
spec = %{ |
||||||
|
id: __MODULE__, |
||||||
|
start: {__MODULE__, :start_link, start_link_arguments}, |
||||||
|
restart: :transient, |
||||||
|
type: :worker |
||||||
|
} |
||||||
|
|
||||||
|
Supervisor.child_spec(spec, []) |
||||||
|
end |
||||||
|
|
||||||
|
def start_link(args, gen_server_options \\ []) do |
||||||
|
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__)) |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def init(_args) do |
||||||
|
{:ok, %{}, {:continue, :ok}} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_continue(_, state) do |
||||||
|
Logger.metadata(fetcher: @fetcher_name) |
||||||
|
# two seconds pause needed to avoid exceeding Supervisor restart intensity when DB issues |
||||||
|
Process.send_after(self(), :init_with_delay, 2000) |
||||||
|
{:noreply, state} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info(:init_with_delay, _state) do |
||||||
|
env = Application.get_all_env(:indexer)[__MODULE__] |
||||||
|
|
||||||
|
with {:start_block_undefined, false} <- {:start_block_undefined, is_nil(env[:start_block])}, |
||||||
|
{:reorg_monitor_started, true} <- {:reorg_monitor_started, !is_nil(Process.whereis(RollupL1ReorgMonitor))}, |
||||||
|
rpc = env[:rpc], |
||||||
|
{:rpc_undefined, false} <- {:rpc_undefined, is_nil(rpc)}, |
||||||
|
{:bridge_contract_address_is_valid, true} <- |
||||||
|
{:bridge_contract_address_is_valid, Helper.address_correct?(env[:bridge_contract])}, |
||||||
|
start_block = parse_integer(env[:start_block]), |
||||||
|
false <- is_nil(start_block), |
||||||
|
true <- start_block > 0, |
||||||
|
{last_l1_block_number, last_l1_transaction_hash} = Reader.last_l1_item(), |
||||||
|
json_rpc_named_arguments = Helper.json_rpc_named_arguments(rpc), |
||||||
|
{:ok, block_check_interval, safe_block} <- Helper.get_block_check_interval(json_rpc_named_arguments), |
||||||
|
{:start_block_valid, true, _, _} <- |
||||||
|
{:start_block_valid, |
||||||
|
(start_block <= last_l1_block_number || last_l1_block_number == 0) && start_block <= safe_block, |
||||||
|
last_l1_block_number, safe_block}, |
||||||
|
{:ok, last_l1_tx} <- Helper.get_transaction_by_hash(last_l1_transaction_hash, json_rpc_named_arguments), |
||||||
|
{:l1_tx_not_found, false} <- {:l1_tx_not_found, !is_nil(last_l1_transaction_hash) && is_nil(last_l1_tx)} do |
||||||
|
Process.send(self(), :continue, []) |
||||||
|
|
||||||
|
{:noreply, |
||||||
|
%{ |
||||||
|
block_check_interval: block_check_interval, |
||||||
|
bridge_contract: env[:bridge_contract], |
||||||
|
json_rpc_named_arguments: json_rpc_named_arguments, |
||||||
|
end_block: safe_block, |
||||||
|
start_block: max(start_block, last_l1_block_number) |
||||||
|
}} |
||||||
|
else |
||||||
|
{:start_block_undefined, true} -> |
||||||
|
# the process shouldn't start if the start block is not defined |
||||||
|
{:stop, :normal, %{}} |
||||||
|
|
||||||
|
{:reorg_monitor_started, false} -> |
||||||
|
Logger.error("Cannot start this process as Indexer.Fetcher.RollupL1ReorgMonitor is not started.") |
||||||
|
{:stop, :normal, %{}} |
||||||
|
|
||||||
|
{:rpc_undefined, true} -> |
||||||
|
Logger.error("L1 RPC URL is not defined.") |
||||||
|
{:stop, :normal, %{}} |
||||||
|
|
||||||
|
{:bridge_contract_address_is_valid, false} -> |
||||||
|
Logger.error("PolygonZkEVMBridge contract address is invalid or not defined.") |
||||||
|
{:stop, :normal, %{}} |
||||||
|
|
||||||
|
{:start_block_valid, false, last_l1_block_number, safe_block} -> |
||||||
|
Logger.error("Invalid L1 Start Block value. Please, check the value and polygon_zkevm_bridge table.") |
||||||
|
Logger.error("last_l1_block_number = #{inspect(last_l1_block_number)}") |
||||||
|
Logger.error("safe_block = #{inspect(safe_block)}") |
||||||
|
{:stop, :normal, %{}} |
||||||
|
|
||||||
|
{:error, error_data} -> |
||||||
|
Logger.error( |
||||||
|
"Cannot get last L1 transaction from RPC by its hash, latest block, or block timestamp by its number due to RPC error: #{inspect(error_data)}" |
||||||
|
) |
||||||
|
|
||||||
|
{:stop, :normal, %{}} |
||||||
|
|
||||||
|
{:l1_tx_not_found, true} -> |
||||||
|
Logger.error( |
||||||
|
"Cannot find last L1 transaction from RPC by its hash. Probably, there was a reorg on L1 chain. Please, check polygon_zkevm_bridge table." |
||||||
|
) |
||||||
|
|
||||||
|
{:stop, :normal, %{}} |
||||||
|
|
||||||
|
_ -> |
||||||
|
Logger.error("L1 Start Block is invalid or zero.") |
||||||
|
{:stop, :normal, %{}} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info( |
||||||
|
:continue, |
||||||
|
%{ |
||||||
|
bridge_contract: bridge_contract, |
||||||
|
block_check_interval: block_check_interval, |
||||||
|
start_block: start_block, |
||||||
|
end_block: end_block, |
||||||
|
json_rpc_named_arguments: json_rpc_named_arguments |
||||||
|
} = state |
||||||
|
) do |
||||||
|
time_before = Timex.now() |
||||||
|
|
||||||
|
last_written_block = |
||||||
|
start_block..end_block |
||||||
|
|> Enum.chunk_every(@eth_get_logs_range_size) |
||||||
|
|> Enum.reduce_while(start_block - 1, fn current_chunk, _ -> |
||||||
|
chunk_start = List.first(current_chunk) |
||||||
|
chunk_end = List.last(current_chunk) |
||||||
|
|
||||||
|
if chunk_start <= chunk_end do |
||||||
|
Helper.log_blocks_chunk_handling(chunk_start, chunk_end, start_block, end_block, nil, "L1") |
||||||
|
|
||||||
|
operations = |
||||||
|
{chunk_start, chunk_end} |
||||||
|
|> get_logs_all(bridge_contract, json_rpc_named_arguments) |
||||||
|
|> prepare_operations(json_rpc_named_arguments, json_rpc_named_arguments) |
||||||
|
|
||||||
|
import_operations(operations) |
||||||
|
|
||||||
|
Helper.log_blocks_chunk_handling( |
||||||
|
chunk_start, |
||||||
|
chunk_end, |
||||||
|
start_block, |
||||||
|
end_block, |
||||||
|
"#{Enum.count(operations)} L1 operation(s)", |
||||||
|
"L1" |
||||||
|
) |
||||||
|
end |
||||||
|
|
||||||
|
reorg_block = RollupL1ReorgMonitor.reorg_block_pop(__MODULE__) |
||||||
|
|
||||||
|
if !is_nil(reorg_block) && reorg_block > 0 do |
||||||
|
reorg_handle(reorg_block) |
||||||
|
{:halt, if(reorg_block <= chunk_end, do: reorg_block - 1, else: chunk_end)} |
||||||
|
else |
||||||
|
{:cont, chunk_end} |
||||||
|
end |
||||||
|
end) |
||||||
|
|
||||||
|
new_start_block = last_written_block + 1 |
||||||
|
{:ok, new_end_block} = Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, 100_000_000) |
||||||
|
|
||||||
|
delay = |
||||||
|
if new_end_block == last_written_block do |
||||||
|
# there is no new block, so wait for some time to let the chain issue the new block |
||||||
|
max(block_check_interval - Timex.diff(Timex.now(), time_before, :milliseconds), 0) |
||||||
|
else |
||||||
|
0 |
||||||
|
end |
||||||
|
|
||||||
|
Process.send_after(self(), :continue, delay) |
||||||
|
|
||||||
|
{:noreply, %{state | start_block: new_start_block, end_block: new_end_block}} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info({ref, _result}, state) do |
||||||
|
Process.demonitor(ref, [:flush]) |
||||||
|
{:noreply, state} |
||||||
|
end |
||||||
|
|
||||||
|
defp reorg_handle(reorg_block) do |
||||||
|
{deleted_count, _} = |
||||||
|
Repo.delete_all(from(b in Bridge, where: b.type == :deposit and b.block_number >= ^reorg_block)) |
||||||
|
|
||||||
|
if deleted_count > 0 do |
||||||
|
Logger.warning( |
||||||
|
"As L1 reorg was detected, some deposits with block_number >= #{reorg_block} were removed from polygon_zkevm_bridge table. Number of removed rows: #{deleted_count}." |
||||||
|
) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,78 @@ |
|||||||
|
defmodule Indexer.Fetcher.PolygonZkevm.BridgeL1Tokens do |
||||||
|
@moduledoc """ |
||||||
|
Fetches information about L1 tokens for zkEVM bridge. |
||||||
|
""" |
||||||
|
|
||||||
|
use Indexer.Fetcher, restart: :permanent |
||||||
|
use Spandex.Decorators |
||||||
|
|
||||||
|
import Ecto.Query |
||||||
|
|
||||||
|
alias Explorer.Repo |
||||||
|
alias Indexer.{BufferedTask, Helper} |
||||||
|
alias Indexer.Fetcher.PolygonZkevm.{Bridge, BridgeL1} |
||||||
|
|
||||||
|
@behaviour BufferedTask |
||||||
|
|
||||||
|
@default_max_batch_size 1 |
||||||
|
@default_max_concurrency 10 |
||||||
|
|
||||||
|
@doc false |
||||||
|
def child_spec([init_options, gen_server_options]) do |
||||||
|
rpc = Application.get_all_env(:indexer)[BridgeL1][:rpc] |
||||||
|
json_rpc_named_arguments = Helper.json_rpc_named_arguments(rpc) |
||||||
|
|
||||||
|
merged_init_opts = |
||||||
|
defaults() |
||||||
|
|> Keyword.merge(init_options) |
||||||
|
|> Keyword.merge(state: json_rpc_named_arguments) |
||||||
|
|
||||||
|
Supervisor.child_spec({BufferedTask, [{__MODULE__, merged_init_opts}, gen_server_options]}, id: __MODULE__) |
||||||
|
end |
||||||
|
|
||||||
|
@impl BufferedTask |
||||||
|
def init(_, _, _) do |
||||||
|
{0, []} |
||||||
|
end |
||||||
|
|
||||||
|
@impl BufferedTask |
||||||
|
def run(l1_token_addresses, json_rpc_named_arguments) when is_list(l1_token_addresses) do |
||||||
|
l1_token_addresses |
||||||
|
|> Bridge.token_addresses_to_ids(json_rpc_named_arguments) |
||||||
|
|> Enum.each(fn {l1_token_address, l1_token_id} -> |
||||||
|
Repo.update_all( |
||||||
|
from(b in Explorer.Chain.PolygonZkevm.Bridge, where: b.l1_token_address == ^l1_token_address), |
||||||
|
set: [l1_token_id: l1_token_id, l1_token_address: nil] |
||||||
|
) |
||||||
|
end) |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Fetches L1 token data asynchronously. |
||||||
|
""" |
||||||
|
def async_fetch(data) do |
||||||
|
async_fetch(data, Application.get_env(:indexer, __MODULE__.Supervisor)[:enabled]) |
||||||
|
end |
||||||
|
|
||||||
|
def async_fetch(_data, false), do: :ok |
||||||
|
|
||||||
|
def async_fetch(operations, _enabled) do |
||||||
|
l1_token_addresses = |
||||||
|
operations |
||||||
|
|> Enum.reject(fn operation -> is_nil(operation.l1_token_address) end) |
||||||
|
|> Enum.map(fn operation -> operation.l1_token_address end) |
||||||
|
|> Enum.uniq() |
||||||
|
|
||||||
|
BufferedTask.buffer(__MODULE__, l1_token_addresses) |
||||||
|
end |
||||||
|
|
||||||
|
defp defaults do |
||||||
|
[ |
||||||
|
flush_interval: 100, |
||||||
|
max_concurrency: Application.get_env(:indexer, __MODULE__)[:concurrency] || @default_max_concurrency, |
||||||
|
max_batch_size: Application.get_env(:indexer, __MODULE__)[:batch_size] || @default_max_batch_size, |
||||||
|
poll: false, |
||||||
|
task_supervisor: __MODULE__.TaskSupervisor |
||||||
|
] |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,176 @@ |
|||||||
|
defmodule Indexer.Fetcher.PolygonZkevm.BridgeL2 do |
||||||
|
@moduledoc """ |
||||||
|
Fills polygon_zkevm_bridge DB table. |
||||||
|
""" |
||||||
|
|
||||||
|
use GenServer |
||||||
|
use Indexer.Fetcher |
||||||
|
|
||||||
|
require Logger |
||||||
|
|
||||||
|
import Ecto.Query |
||||||
|
import Explorer.Helper, only: [parse_integer: 1] |
||||||
|
|
||||||
|
import Indexer.Fetcher.PolygonZkevm.Bridge, |
||||||
|
only: [get_logs_all: 3, import_operations: 1, prepare_operations: 3] |
||||||
|
|
||||||
|
alias Explorer.Chain.PolygonZkevm.{Bridge, Reader} |
||||||
|
alias Explorer.Repo |
||||||
|
alias Indexer.Helper |
||||||
|
|
||||||
|
@eth_get_logs_range_size 1000 |
||||||
|
@fetcher_name :polygon_zkevm_bridge_l2 |
||||||
|
|
||||||
|
def child_spec(start_link_arguments) do |
||||||
|
spec = %{ |
||||||
|
id: __MODULE__, |
||||||
|
start: {__MODULE__, :start_link, start_link_arguments}, |
||||||
|
restart: :transient, |
||||||
|
type: :worker |
||||||
|
} |
||||||
|
|
||||||
|
Supervisor.child_spec(spec, []) |
||||||
|
end |
||||||
|
|
||||||
|
def start_link(args, gen_server_options \\ []) do |
||||||
|
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__)) |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def init(args) do |
||||||
|
json_rpc_named_arguments = args[:json_rpc_named_arguments] |
||||||
|
{:ok, %{}, {:continue, json_rpc_named_arguments}} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_continue(json_rpc_named_arguments, _state) do |
||||||
|
Logger.metadata(fetcher: @fetcher_name) |
||||||
|
# two seconds pause needed to avoid exceeding Supervisor restart intensity when DB issues |
||||||
|
Process.send_after(self(), :init_with_delay, 2000) |
||||||
|
{:noreply, %{json_rpc_named_arguments: json_rpc_named_arguments}} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info(:init_with_delay, %{json_rpc_named_arguments: json_rpc_named_arguments} = state) do |
||||||
|
env = Application.get_all_env(:indexer)[__MODULE__] |
||||||
|
|
||||||
|
with {:start_block_undefined, false} <- {:start_block_undefined, is_nil(env[:start_block])}, |
||||||
|
rpc_l1 = Application.get_all_env(:indexer)[Indexer.Fetcher.PolygonZkevm.BridgeL1][:rpc], |
||||||
|
{:rpc_l1_undefined, false} <- {:rpc_l1_undefined, is_nil(rpc_l1)}, |
||||||
|
{:bridge_contract_address_is_valid, true} <- |
||||||
|
{:bridge_contract_address_is_valid, Helper.address_correct?(env[:bridge_contract])}, |
||||||
|
start_block = parse_integer(env[:start_block]), |
||||||
|
false <- is_nil(start_block), |
||||||
|
true <- start_block > 0, |
||||||
|
{last_l2_block_number, last_l2_transaction_hash} = Reader.last_l2_item(), |
||||||
|
{:ok, latest_block} = Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, 100_000_000), |
||||||
|
{:start_block_valid, true} <- |
||||||
|
{:start_block_valid, |
||||||
|
(start_block <= last_l2_block_number || last_l2_block_number == 0) && start_block <= latest_block}, |
||||||
|
{:ok, last_l2_tx} <- Helper.get_transaction_by_hash(last_l2_transaction_hash, json_rpc_named_arguments), |
||||||
|
{:l2_tx_not_found, false} <- {:l2_tx_not_found, !is_nil(last_l2_transaction_hash) && is_nil(last_l2_tx)} do |
||||||
|
Process.send(self(), :continue, []) |
||||||
|
|
||||||
|
{:noreply, |
||||||
|
%{ |
||||||
|
bridge_contract: env[:bridge_contract], |
||||||
|
json_rpc_named_arguments: json_rpc_named_arguments, |
||||||
|
json_rpc_named_arguments_l1: Helper.json_rpc_named_arguments(rpc_l1), |
||||||
|
end_block: latest_block, |
||||||
|
start_block: max(start_block, last_l2_block_number) |
||||||
|
}} |
||||||
|
else |
||||||
|
{:start_block_undefined, true} -> |
||||||
|
# the process shouldn't start if the start block is not defined |
||||||
|
{:stop, :normal, state} |
||||||
|
|
||||||
|
{:rpc_l1_undefined, true} -> |
||||||
|
Logger.error("L1 RPC URL is not defined.") |
||||||
|
{:stop, :normal, state} |
||||||
|
|
||||||
|
{:bridge_contract_address_is_valid, false} -> |
||||||
|
Logger.error("PolygonZkEVMBridge contract address is invalid or not defined.") |
||||||
|
{:stop, :normal, state} |
||||||
|
|
||||||
|
{:start_block_valid, false} -> |
||||||
|
Logger.error("Invalid L2 Start Block value. Please, check the value and polygon_zkevm_bridge table.") |
||||||
|
{:stop, :normal, state} |
||||||
|
|
||||||
|
{:error, error_data} -> |
||||||
|
Logger.error( |
||||||
|
"Cannot get last L2 transaction from RPC by its hash or latest block due to RPC error: #{inspect(error_data)}" |
||||||
|
) |
||||||
|
|
||||||
|
{:stop, :normal, state} |
||||||
|
|
||||||
|
{:l2_tx_not_found, true} -> |
||||||
|
Logger.error( |
||||||
|
"Cannot find last L2 transaction from RPC by its hash. Probably, there was a reorg on L2 chain. Please, check polygon_zkevm_bridge table." |
||||||
|
) |
||||||
|
|
||||||
|
{:stop, :normal, state} |
||||||
|
|
||||||
|
_ -> |
||||||
|
Logger.error("L2 Start Block is invalid or zero.") |
||||||
|
{:stop, :normal, state} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info( |
||||||
|
:continue, |
||||||
|
%{ |
||||||
|
bridge_contract: bridge_contract, |
||||||
|
start_block: start_block, |
||||||
|
end_block: end_block, |
||||||
|
json_rpc_named_arguments: json_rpc_named_arguments, |
||||||
|
json_rpc_named_arguments_l1: json_rpc_named_arguments_l1 |
||||||
|
} = state |
||||||
|
) do |
||||||
|
start_block..end_block |
||||||
|
|> Enum.chunk_every(@eth_get_logs_range_size) |
||||||
|
|> Enum.each(fn current_chunk -> |
||||||
|
chunk_start = List.first(current_chunk) |
||||||
|
chunk_end = List.last(current_chunk) |
||||||
|
|
||||||
|
if chunk_start <= chunk_end do |
||||||
|
Helper.log_blocks_chunk_handling(chunk_start, chunk_end, start_block, end_block, nil, "L2") |
||||||
|
|
||||||
|
operations = |
||||||
|
{chunk_start, chunk_end} |
||||||
|
|> get_logs_all(bridge_contract, json_rpc_named_arguments) |
||||||
|
|> prepare_operations(json_rpc_named_arguments, json_rpc_named_arguments_l1) |
||||||
|
|
||||||
|
import_operations(operations) |
||||||
|
|
||||||
|
Helper.log_blocks_chunk_handling( |
||||||
|
chunk_start, |
||||||
|
chunk_end, |
||||||
|
start_block, |
||||||
|
end_block, |
||||||
|
"#{Enum.count(operations)} L2 operation(s)", |
||||||
|
"L2" |
||||||
|
) |
||||||
|
end |
||||||
|
end) |
||||||
|
|
||||||
|
{:stop, :normal, state} |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info({ref, _result}, state) do |
||||||
|
Process.demonitor(ref, [:flush]) |
||||||
|
{:noreply, state} |
||||||
|
end |
||||||
|
|
||||||
|
def reorg_handle(reorg_block) do |
||||||
|
{deleted_count, _} = |
||||||
|
Repo.delete_all(from(b in Bridge, where: b.type == :withdrawal and b.block_number >= ^reorg_block)) |
||||||
|
|
||||||
|
if deleted_count > 0 do |
||||||
|
Logger.warning( |
||||||
|
"As L2 reorg was detected, some withdrawals with block_number >= #{reorg_block} were removed from polygon_zkevm_bridge table. Number of removed rows: #{deleted_count}." |
||||||
|
) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,153 @@ |
|||||||
|
defmodule Indexer.Fetcher.RollupL1ReorgMonitor do |
||||||
|
@moduledoc """ |
||||||
|
A module to catch L1 reorgs and notify a rollup module about it. |
||||||
|
""" |
||||||
|
|
||||||
|
use GenServer |
||||||
|
use Indexer.Fetcher |
||||||
|
|
||||||
|
require Logger |
||||||
|
|
||||||
|
alias Indexer.{BoundQueue, Helper} |
||||||
|
|
||||||
|
@fetcher_name :rollup_l1_reorg_monitor |
||||||
|
|
||||||
|
def child_spec(start_link_arguments) do |
||||||
|
spec = %{ |
||||||
|
id: __MODULE__, |
||||||
|
start: {__MODULE__, :start_link, start_link_arguments}, |
||||||
|
restart: :transient, |
||||||
|
type: :worker |
||||||
|
} |
||||||
|
|
||||||
|
Supervisor.child_spec(spec, []) |
||||||
|
end |
||||||
|
|
||||||
|
def start_link(args, gen_server_options \\ []) do |
||||||
|
GenServer.start_link(__MODULE__, args, Keyword.put_new(gen_server_options, :name, __MODULE__)) |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def init(_args) do |
||||||
|
Logger.metadata(fetcher: @fetcher_name) |
||||||
|
|
||||||
|
modules_can_use_reorg_monitor = [ |
||||||
|
Indexer.Fetcher.PolygonEdge.Deposit, |
||||||
|
Indexer.Fetcher.PolygonEdge.WithdrawalExit, |
||||||
|
Indexer.Fetcher.PolygonZkevm.BridgeL1, |
||||||
|
Indexer.Fetcher.Shibarium.L1 |
||||||
|
] |
||||||
|
|
||||||
|
modules_using_reorg_monitor = |
||||||
|
modules_can_use_reorg_monitor |
||||||
|
|> Enum.reject(fn module -> |
||||||
|
module_config = Application.get_all_env(:indexer)[module] |
||||||
|
is_nil(module_config[:start_block]) and is_nil(module_config[:start_block_l1]) |
||||||
|
end) |
||||||
|
|
||||||
|
if Enum.empty?(modules_using_reorg_monitor) do |
||||||
|
# don't start reorg monitor as there is no module which would use it |
||||||
|
:ignore |
||||||
|
else |
||||||
|
# As there cannot be different modules for different rollups at the same time, |
||||||
|
# it's correct to only get the first item of the list. |
||||||
|
# For example, Indexer.Fetcher.PolygonEdge.Deposit and Indexer.Fetcher.PolygonEdge.WithdrawalExit can be in the list |
||||||
|
# because they are for the same rollup, but Indexer.Fetcher.Shibarium.L1 and Indexer.Fetcher.PolygonZkevm.BridgeL1 cannot (as they are for different rollups). |
||||||
|
module_using_reorg_monitor = Enum.at(modules_using_reorg_monitor, 0) |
||||||
|
|
||||||
|
l1_rpc = |
||||||
|
if Enum.member?( |
||||||
|
[Indexer.Fetcher.PolygonEdge.Deposit, Indexer.Fetcher.PolygonEdge.WithdrawalExit], |
||||||
|
module_using_reorg_monitor |
||||||
|
) do |
||||||
|
# there can be more than one PolygonEdge.* modules, so we get the common L1 RPC URL for them from Indexer.Fetcher.PolygonEdge |
||||||
|
Application.get_all_env(:indexer)[Indexer.Fetcher.PolygonEdge][:polygon_edge_l1_rpc] |
||||||
|
else |
||||||
|
Application.get_all_env(:indexer)[module_using_reorg_monitor][:rpc] |
||||||
|
end |
||||||
|
|
||||||
|
json_rpc_named_arguments = Helper.json_rpc_named_arguments(l1_rpc) |
||||||
|
|
||||||
|
{:ok, block_check_interval, _} = Helper.get_block_check_interval(json_rpc_named_arguments) |
||||||
|
|
||||||
|
Process.send(self(), :reorg_monitor, []) |
||||||
|
|
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
block_check_interval: block_check_interval, |
||||||
|
json_rpc_named_arguments: json_rpc_named_arguments, |
||||||
|
modules: modules_using_reorg_monitor, |
||||||
|
prev_latest: 0 |
||||||
|
}} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
@impl GenServer |
||||||
|
def handle_info( |
||||||
|
:reorg_monitor, |
||||||
|
%{ |
||||||
|
block_check_interval: block_check_interval, |
||||||
|
json_rpc_named_arguments: json_rpc_named_arguments, |
||||||
|
modules: modules, |
||||||
|
prev_latest: prev_latest |
||||||
|
} = state |
||||||
|
) do |
||||||
|
{:ok, latest} = Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, 100_000_000) |
||||||
|
|
||||||
|
if latest < prev_latest do |
||||||
|
Logger.warning("Reorg detected: previous latest block ##{prev_latest}, current latest block ##{latest}.") |
||||||
|
Enum.each(modules, &reorg_block_push(latest, &1)) |
||||||
|
end |
||||||
|
|
||||||
|
Process.send_after(self(), :reorg_monitor, block_check_interval) |
||||||
|
|
||||||
|
{:noreply, %{state | prev_latest: latest}} |
||||||
|
end |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Pops the number of reorg block from the front of the queue for the specified rollup module. |
||||||
|
Returns `nil` if the reorg queue is empty. |
||||||
|
""" |
||||||
|
@spec reorg_block_pop(module()) :: non_neg_integer() | nil |
||||||
|
def reorg_block_pop(module) do |
||||||
|
table_name = reorg_table_name(module) |
||||||
|
|
||||||
|
case BoundQueue.pop_front(reorg_queue_get(table_name)) do |
||||||
|
{:ok, {block_number, updated_queue}} -> |
||||||
|
:ets.insert(table_name, {:queue, updated_queue}) |
||||||
|
block_number |
||||||
|
|
||||||
|
{:error, :empty} -> |
||||||
|
nil |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp reorg_block_push(block_number, module) do |
||||||
|
table_name = reorg_table_name(module) |
||||||
|
{:ok, updated_queue} = BoundQueue.push_back(reorg_queue_get(table_name), block_number) |
||||||
|
:ets.insert(table_name, {:queue, updated_queue}) |
||||||
|
end |
||||||
|
|
||||||
|
defp reorg_queue_get(table_name) do |
||||||
|
if :ets.whereis(table_name) == :undefined do |
||||||
|
:ets.new(table_name, [ |
||||||
|
:set, |
||||||
|
:named_table, |
||||||
|
:public, |
||||||
|
read_concurrency: true, |
||||||
|
write_concurrency: true |
||||||
|
]) |
||||||
|
end |
||||||
|
|
||||||
|
with info when info != :undefined <- :ets.info(table_name), |
||||||
|
[{_, value}] <- :ets.lookup(table_name, :queue) do |
||||||
|
value |
||||||
|
else |
||||||
|
_ -> %BoundQueue{} |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp reorg_table_name(module) do |
||||||
|
:"#{module}#{:_reorgs}" |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,77 @@ |
|||||||
|
defmodule Indexer.Transform.PolygonZkevm.Bridge do |
||||||
|
@moduledoc """ |
||||||
|
Helper functions for transforming data for Polygon zkEVM Bridge operations. |
||||||
|
""" |
||||||
|
|
||||||
|
require Logger |
||||||
|
|
||||||
|
import Indexer.Fetcher.PolygonZkevm.Bridge, |
||||||
|
only: [filter_bridge_events: 2, prepare_operations: 4] |
||||||
|
|
||||||
|
alias Indexer.Fetcher.PolygonZkevm.{BridgeL1, BridgeL2} |
||||||
|
alias Indexer.Helper |
||||||
|
|
||||||
|
@doc """ |
||||||
|
Returns a list of operations given a list of blocks and logs. |
||||||
|
""" |
||||||
|
@spec parse(list(), list()) :: list() |
||||||
|
def parse(blocks, logs) do |
||||||
|
prev_metadata = Logger.metadata() |
||||||
|
Logger.metadata(fetcher: :polygon_zkevm_bridge_l2_realtime) |
||||||
|
|
||||||
|
items = |
||||||
|
with false <- is_nil(Application.get_env(:indexer, BridgeL2)[:start_block]), |
||||||
|
false <- System.get_env("CHAIN_TYPE") != "polygon_zkevm", |
||||||
|
rpc_l1 = Application.get_all_env(:indexer)[BridgeL1][:rpc], |
||||||
|
{:rpc_l1_undefined, false} <- {:rpc_l1_undefined, is_nil(rpc_l1)}, |
||||||
|
bridge_contract = Application.get_env(:indexer, BridgeL2)[:bridge_contract], |
||||||
|
{:bridge_contract_address_is_valid, true} <- |
||||||
|
{:bridge_contract_address_is_valid, Helper.address_correct?(bridge_contract)} do |
||||||
|
bridge_contract = String.downcase(bridge_contract) |
||||||
|
|
||||||
|
block_numbers = Enum.map(blocks, fn block -> block.number end) |
||||||
|
start_block = Enum.min(block_numbers) |
||||||
|
end_block = Enum.max(block_numbers) |
||||||
|
|
||||||
|
Helper.log_blocks_chunk_handling(start_block, end_block, start_block, end_block, nil, "L2") |
||||||
|
|
||||||
|
json_rpc_named_arguments_l1 = Helper.json_rpc_named_arguments(rpc_l1) |
||||||
|
|
||||||
|
block_to_timestamp = Enum.reduce(blocks, %{}, fn block, acc -> Map.put(acc, block.number, block.timestamp) end) |
||||||
|
|
||||||
|
items = |
||||||
|
logs |
||||||
|
|> filter_bridge_events(bridge_contract) |
||||||
|
|> prepare_operations(nil, json_rpc_named_arguments_l1, block_to_timestamp) |
||||||
|
|
||||||
|
Helper.log_blocks_chunk_handling( |
||||||
|
start_block, |
||||||
|
end_block, |
||||||
|
start_block, |
||||||
|
end_block, |
||||||
|
"#{Enum.count(items)} L2 operation(s)", |
||||||
|
"L2" |
||||||
|
) |
||||||
|
|
||||||
|
items |
||||||
|
else |
||||||
|
true -> |
||||||
|
[] |
||||||
|
|
||||||
|
{:rpc_l1_undefined, true} -> |
||||||
|
Logger.error("L1 RPC URL is not defined. Cannot use #{__MODULE__} for parsing logs.") |
||||||
|
[] |
||||||
|
|
||||||
|
{:bridge_contract_address_is_valid, false} -> |
||||||
|
Logger.error( |
||||||
|
"PolygonZkEVMBridge contract address is invalid or not defined. Cannot use #{__MODULE__} for parsing logs." |
||||||
|
) |
||||||
|
|
||||||
|
[] |
||||||
|
end |
||||||
|
|
||||||
|
Logger.reset_metadata(prev_metadata) |
||||||
|
|
||||||
|
items |
||||||
|
end |
||||||
|
end |
Loading…
Reference in new issue