fix: Workaround for repeating logIndex (#10880)

* fix: Workaround for repeating logIndex

* Fix tests

* Done migration, but need to rewrite tuples usage

* Migration rewritten

* Fix tests

* Fix test

* Process review comments

* Update cspell ignore

* Refactoring
pull/10977/head
nikitosing 1 month ago committed by GitHub
parent 1bb721865b
commit 213a3247ae
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 15
      apps/ethereum_jsonrpc/lib/ethereum_jsonrpc/log.ex
  2. 2
      apps/explorer/config/test.exs
  3. 13
      apps/explorer/lib/explorer/application.ex
  4. 12
      apps/explorer/lib/explorer/chain/cache/background_migrations.ex
  5. 34
      apps/explorer/lib/explorer/chain/import/runner/blocks.ex
  6. 14
      apps/explorer/lib/explorer/migrator/filling_migration.ex
  7. 271
      apps/explorer/lib/explorer/migrator/sanitize_duplicated_log_index_logs.ex
  8. 117
      apps/explorer/test/explorer/chain/import/runner/blocks_test.exs
  9. 144
      apps/explorer/test/explorer/migrator/sanitize_duplicated_log_index_logs_test.ex
  10. 5
      apps/explorer/test/support/factory.ex
  11. 20
      apps/indexer/lib/indexer/block/fetcher.ex
  12. 5
      cspell.json

@ -46,7 +46,8 @@ defmodule EthereumJSONRPC.Log do
index: 0,
second_topic: nil,
third_topic: nil,
transaction_hash: "0x53bd884872de3e488692881baeec262e7b95234d3965248c39fe992fffd433e5"
transaction_hash: "0x53bd884872de3e488692881baeec262e7b95234d3965248c39fe992fffd433e5",
transaction_index: 0
}
iex> EthereumJSONRPC.Log.elixir_to_params(
@ -74,11 +75,13 @@ defmodule EthereumJSONRPC.Log do
index: 0,
second_topic: "0x000000000000000000000000c15bf627accd3b054075c7880425f903106be72a",
third_topic: "0x000000000000000000000000a59eb37750f9c8f2e11aac6700e62ef89187e4ed",
transaction_hash: "0xf9b663b4e9b1fdc94eb27b5cfba04eb03d2f7b3fa0b24eb2e1af34f823f2b89e"
transaction_hash: "0xf9b663b4e9b1fdc94eb27b5cfba04eb03d2f7b3fa0b24eb2e1af34f823f2b89e",
transaction_index: 0
}
"""
def elixir_to_params(%{
def elixir_to_params(
%{
"address" => address_hash,
"blockNumber" => block_number,
"blockHash" => block_hash,
@ -86,14 +89,16 @@ defmodule EthereumJSONRPC.Log do
"logIndex" => index,
"topics" => topics,
"transactionHash" => transaction_hash
}) do
} = log
) do
%{
address_hash: address_hash,
block_number: block_number,
block_hash: block_hash,
data: data,
index: index,
transaction_hash: transaction_hash
transaction_hash: transaction_hash,
transaction_index: log["transactionIndex"]
}
|> put_topics(topics)
end

@ -14,7 +14,7 @@ config :explorer, Explorer.Repo,
url: database_url,
pool: Ecto.Adapters.SQL.Sandbox,
# Default of `5_000` was too low for `BlockFetcher` test
ownership_timeout: :timer.minutes(7),
ownership_timeout: :timer.minutes(1),
timeout: :timer.seconds(60),
queue_target: 1000,
migration_lock: nil,

@ -146,6 +146,11 @@ defmodule Explorer.Application do
configure_mode_dependent_process(Explorer.Migrator.ShrinkInternalTransactions, :indexer),
configure_chain_type_dependent_process(Explorer.Chain.Cache.BlackfortValidatorsCounters, :blackfort),
configure_chain_type_dependent_process(Explorer.Chain.Cache.StabilityValidatorsCounters, :stability),
configure_chain_type_dependent_process(Explorer.Migrator.SanitizeDuplicatedLogIndexLogs, [
:polygon_zkevm,
:rsk,
:filecoin
]),
configure_mode_dependent_process(Explorer.Migrator.SanitizeMissingTokenBalances, :indexer),
configure_mode_dependent_process(Explorer.Migrator.SanitizeReplacedTransactions, :indexer),
configure_mode_dependent_process(Explorer.Migrator.ReindexInternalTransactionsWithIncompatibleStatus, :indexer)
@ -207,6 +212,14 @@ defmodule Explorer.Application do
end
end
defp configure_chain_type_dependent_process(process, chain_types) when is_list(chain_types) do
if Application.get_env(:explorer, :chain_type) in chain_types do
process
else
[]
end
end
defp configure_chain_type_dependent_process(process, chain_type) do
if Application.get_env(:explorer, :chain_type) == chain_type do
process

@ -10,13 +10,15 @@ defmodule Explorer.Chain.Cache.BackgroundMigrations do
key: :transactions_denormalization_finished,
key: :tb_token_type_finished,
key: :ctb_token_type_finished,
key: :tt_denormalization_finished
key: :tt_denormalization_finished,
key: :sanitize_duplicated_log_index_logs_finished
@dialyzer :no_match
alias Explorer.Migrator.{
AddressCurrentTokenBalanceTokenType,
AddressTokenBalanceTokenType,
SanitizeDuplicatedLogIndexLogs,
TokenTransferTokenType,
TransactionsDenormalization
}
@ -52,4 +54,12 @@ defmodule Explorer.Chain.Cache.BackgroundMigrations do
{:return, false}
end
defp handle_fallback(:sanitize_duplicated_log_index_logs_finished) do
Task.start_link(fn ->
set_sanitize_duplicated_log_index_logs_finished(SanitizeDuplicatedLogIndexLogs.migration_finished?())
end)
{:return, false}
end
end

@ -828,29 +828,6 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
end
defp refs_to_token_transfers_query(historical_token_transfers_query, filtered_query) do
if Application.get_env(:explorer, :chain_type) in [:polygon_zkevm, :rsk] do
from(historical_tt in subquery(historical_token_transfers_query),
inner_join: tt in subquery(filtered_query),
on:
tt.token_contract_address_hash == historical_tt.token_contract_address_hash and
tt.block_number == historical_tt.block_number and
fragment("? @> ARRAY[?::decimal]", tt.token_ids, historical_tt.token_id),
inner_join: t in Transaction,
on: tt.transaction_hash == t.hash,
select: %{
token_contract_address_hash: tt.token_contract_address_hash,
token_id: historical_tt.token_id,
block_number: tt.block_number,
transaction_hash: t.hash,
log_index: tt.log_index,
position:
over(row_number(),
partition_by: [tt.token_contract_address_hash, historical_tt.token_id, tt.block_number],
order_by: [desc: t.index, desc: tt.log_index]
)
}
)
else
from(historical_tt in subquery(historical_token_transfers_query),
inner_join: tt in subquery(filtered_query),
on:
@ -866,24 +843,13 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
group_by: [tt.token_contract_address_hash, historical_tt.token_id, tt.block_number]
)
end
end
defp derived_token_transfers_query(refs_to_token_transfers, filtered_query) do
if Application.get_env(:explorer, :chain_type) in [:polygon_zkevm, :rsk] do
from(tt in filtered_query,
inner_join: tt_1 in subquery(refs_to_token_transfers),
on:
tt_1.log_index == tt.log_index and tt_1.block_number == tt.block_number and
tt_1.transaction_hash == tt.transaction_hash,
where: tt_1.position == 1
)
else
from(tt in filtered_query,
inner_join: tt_1 in subquery(refs_to_token_transfers),
on: tt_1.log_index == tt.log_index and tt_1.block_number == tt.block_number
)
end
end
defp token_instances_on_conflict do
from(

@ -8,6 +8,8 @@ defmodule Explorer.Migrator.FillingMigration do
@callback last_unprocessed_identifiers(map()) :: {[any()], map()}
@callback update_batch([any()]) :: any()
@callback update_cache :: any()
@callback on_finish :: any()
@callback before_start :: any()
defmacro __using__(_opts) do
quote do
@ -44,6 +46,7 @@ defmodule Explorer.Migrator.FillingMigration do
migration_status ->
MigrationStatus.set_status(migration_name(), "started")
before_start()
schedule_batch_migration()
{:noreply, (migration_status && migration_status.meta) || %{}}
end
@ -53,6 +56,7 @@ defmodule Explorer.Migrator.FillingMigration do
def handle_info(:migrate_batch, state) do
case last_unprocessed_identifiers(state) do
{[], new_state} ->
on_finish()
update_cache()
MigrationStatus.set_status(migration_name(), "completed")
{:stop, :normal, new_state}
@ -86,6 +90,16 @@ defmodule Explorer.Migrator.FillingMigration do
Application.get_env(:explorer, __MODULE__)[:concurrency] || default
end
def on_finish do
:ignore
end
def before_start do
:ignore
end
defoverridable on_finish: 0, before_start: 0
end
end
end

@ -0,0 +1,271 @@
defmodule Explorer.Migrator.SanitizeDuplicatedLogIndexLogs do
@moduledoc """
This module is responsible for sanitizing duplicate log index entries in the database.
The migration process includes identifying duplicate log indexes and updating the related token transfers and token instances accordingly.
"""
use Explorer.Migrator.FillingMigration
import Ecto.Query
alias Explorer.Chain.Cache.BackgroundMigrations
alias Explorer.Chain.{Log, TokenTransfer}
alias Explorer.Chain.Token.Instance
alias Explorer.Migrator.FillingMigration
alias Explorer.Repo
require Logger
@migration_name "sanitize_duplicated_log_index_logs"
@impl FillingMigration
def migration_name, do: @migration_name
@impl FillingMigration
def last_unprocessed_identifiers(state) do
block_number = state[:block_number_to_process] || 0
limit = batch_size() * concurrency()
ids =
block_number
|> unprocessed_data_query(block_number + limit)
|> Repo.all(timeout: :infinity)
|> Enum.group_by(& &1.block_hash)
|> Map.to_list()
{ids, Map.put(state, :block_number_to_process, block_number + limit)}
end
@doc """
Stub implementation to satisfy FillingMigration behaviour
"""
@impl FillingMigration
@spec unprocessed_data_query() :: nil
def unprocessed_data_query do
nil
end
def unprocessed_data_query(block_number_start, block_number_end) do
Log
|> where([l], l.block_number >= ^block_number_start and l.block_number < ^block_number_end)
end
@impl FillingMigration
@doc """
Updates a batch of logs grouped by block.
## Parameters
- logs_by_block: A map where the keys are block identifiers and the values are lists of logs associated with those blocks.
## Returns
:ok
"""
def update_batch(logs_by_block) do
logs_to_update =
logs_by_block
|> Enum.map(&process_block/1)
|> Enum.reject(&(&1 == :ignore))
|> List.flatten()
{ids, logs, ids_to_new_index} =
logs_to_update
|> Enum.reduce({[], [], %{}}, fn {log, new_index}, {ids, logs, ids_to_new_index} ->
id = {log.transaction_hash, log.block_hash, log.index}
{[id | ids],
[
%Log{log | index: new_index} |> Map.from_struct() |> Map.drop([:block, :address, :transaction, :__meta__])
| logs
], Map.put(ids_to_new_index, id, new_index)}
end)
prepared_ids =
Enum.map(ids, fn {transaction_hash, block_hash, log_index} ->
{transaction_hash.bytes, block_hash.bytes, log_index}
end)
Repo.transaction(fn ->
Log
|> where(
[log],
fragment(
"(?, ?, ?) = ANY(?::log_id[])",
log.transaction_hash,
log.block_hash,
log.index,
^prepared_ids
)
)
|> Repo.delete_all(timeout: :infinity)
{_, token_transfers} =
TokenTransfer
|> where(
[token_transfer],
fragment(
"(?, ?, ?) = ANY(?::log_id[])",
token_transfer.transaction_hash,
token_transfer.block_hash,
token_transfer.log_index,
^prepared_ids
)
)
|> select([token_transfer], token_transfer)
|> Repo.delete_all(timeout: :infinity)
Repo.insert_all(Log, logs, timeout: :infinity)
token_transfers
|> Enum.map(fn token_transfer ->
id = token_transfer_to_index(token_transfer)
%TokenTransfer{token_transfer | log_index: ids_to_new_index[id]}
|> Map.from_struct()
|> Map.drop([
:token_id,
:index_in_batch,
:reverse_index_in_batch,
:token_decimals,
:from_address,
:to_address,
:token_contract_address,
:block,
:instances,
:token,
:transaction,
:__meta__
])
end)
|> (&Repo.insert_all(TokenTransfer, &1, timeout: :infinity)).()
nft_instances_params =
token_transfers
|> Enum.filter(&(&1.token_type == "ERC-721"))
|> Enum.map(fn token_transfer -> {token_transfer.block_number, token_transfer.log_index} end)
nft_updates_map =
token_transfers
|> Enum.filter(&(&1.token_type == "ERC-721" && &1.block_consensus))
|> Enum.reduce(%{}, fn token_transfer, acc ->
id = token_transfer_to_index(token_transfer)
Map.put(acc, {token_transfer.block_number, token_transfer.log_index}, ids_to_new_index[id])
end)
Instance
|> where(
[nft],
fragment(
"(?, ?) = ANY(?::nft_id[])",
nft.owner_updated_at_block,
nft.owner_updated_at_log_index,
^nft_instances_params
)
)
|> Repo.all(timeout: :infinity)
|> Enum.map(fn nft ->
%Instance{
nft
| owner_updated_at_log_index: nft_updates_map[{nft.owner_updated_at_block, nft.owner_updated_at_log_index}]
}
|> Map.from_struct()
|> Map.drop([
:current_token_balance,
:is_unique,
:owner,
:token,
:__meta__
])
end)
|> (&Repo.insert_all(Instance, &1,
conflict_target: [:token_contract_address_hash, :token_id],
on_conflict: {:replace, [:owner_updated_at_log_index]},
timeout: :infinity
)).()
end)
:ok
end
defp process_block({block_hash, logs}) do
if logs |> Enum.frequencies_by(& &1.index) |> Map.values() |> Enum.max() == 1 do
:ignore
else
Logger.error("Found logs with same index within one block: #{block_hash} in DB")
logs = Repo.preload(logs, :transaction)
logs
|> Enum.sort_by(&{&1.transaction.index, &1.index, &1.transaction_hash})
# credo:disable-for-next-line Credo.Check.Refactor.Nesting
|> Enum.map_reduce(0, fn log, index ->
{{log, index}, index + 1}
end)
|> elem(0)
end
end
@impl FillingMigration
def update_cache do
BackgroundMigrations.set_sanitize_duplicated_log_index_logs_finished(true)
end
defp token_transfer_to_index(token_transfer) do
{token_transfer.transaction_hash, token_transfer.block_hash, token_transfer.log_index}
end
@doc """
Callback function that is executed before the migration process starts.
"""
@impl FillingMigration
def before_start do
"""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'log_id') THEN
CREATE TYPE log_id AS (
transaction_hash bytea,
block_hash bytea,
log_index integer
);
END IF;
END$$;
"""
|> Repo.query!()
"""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'nft_id') THEN
CREATE TYPE nft_id AS (
block_number bigint,
log_index integer
);
END IF;
END$$;
"""
|> Repo.query!()
:ok
end
@doc """
Callback function that is executed when the migration process finishes.
"""
@impl FillingMigration
def on_finish do
"""
DROP TYPE log_id;
"""
|> Repo.query!([], timeout: :infinity)
"""
DROP TYPE nft_id;
"""
|> Repo.query!([], timeout: :infinity)
:ok
end
end

@ -86,123 +86,6 @@ defmodule Explorer.Chain.Import.Runner.BlocksTest do
"Tuple was written even though it is not distinct"
end
test "update_token_instances_owner inserts correct token instances in cases when log_index is not unique within block",
%{
consensus_block: %{hash: previous_block_hash, miner_hash: miner_hash, number: previous_block_number},
options: options
} do
old_env = Application.get_env(:explorer, :chain_type)
Application.put_env(:explorer, :chain_type, :polygon_zkevm)
previous_consensus_block = insert(:block, hash: previous_block_hash, number: previous_block_number)
%{hash: block_hash, number: block_number} = consensus_block = insert(:block)
transaction =
:transaction
|> insert()
|> with_block(consensus_block)
transaction_with_previous_transfer =
:transaction
|> insert()
|> with_block(previous_consensus_block, index: 1)
older_transaction_with_previous_transfer =
:transaction
|> insert()
|> with_block(previous_consensus_block, index: 0)
transaction_of_other_instance =
:transaction
|> insert()
|> with_block(previous_consensus_block)
token = insert(:token, type: "ERC-721")
correct_token_id = Decimal.new(1)
forked_token_transfer =
insert(:token_transfer,
token_type: "ERC-721",
token_contract_address: token.contract_address,
transaction: transaction,
token_ids: [correct_token_id],
block_number: block_number
)
_token_instance =
insert(:token_instance,
token_id: correct_token_id,
token_contract_address_hash: token.contract_address_hash,
owner_updated_at_block: block_number,
owner_updated_at_log_index: forked_token_transfer.log_index
)
_previous_token_transfer =
insert(:token_transfer,
token_type: "ERC-721",
token_contract_address: token.contract_address,
transaction: transaction_with_previous_transfer,
token_ids: [correct_token_id],
block_number: previous_block_number,
log_index: 10
)
_older_previous_token_transfer =
insert(:token_transfer,
token_type: "ERC-721",
token_contract_address: token.contract_address,
transaction: older_transaction_with_previous_transfer,
token_ids: [correct_token_id],
block_number: previous_block_number,
log_index: 11
)
_unsuitable_token_instance =
insert(:token_instance,
token_id: 2,
token_contract_address_hash: token.contract_address_hash,
owner_updated_at_block: previous_block_number,
owner_updated_at_log_index: forked_token_transfer.log_index
)
_unsuitable_token_transfer =
insert(:token_transfer,
token_type: "ERC-721",
token_contract_address: token.contract_address,
transaction: transaction_of_other_instance,
token_ids: [2],
block_number: previous_block_number,
log_index: forked_token_transfer.log_index
)
block_params =
params_for(:block, hash: block_hash, miner_hash: miner_hash, number: block_number, consensus: false)
%Ecto.Changeset{valid?: true, changes: block_changes} = Block.changeset(%Block{}, block_params)
changes_list = [block_changes]
assert {:ok, %{}}
assert {:ok,
%{
update_token_instances_owner: [
%{
token_id: ^correct_token_id,
owner_updated_at_block: ^previous_block_number,
owner_updated_at_log_index: 10
}
]
}} =
Multi.new()
|> Blocks.run(changes_list, options)
|> Repo.transaction()
on_exit(fn ->
Application.put_env(:explorer, :chain_type, old_env)
end)
end
test "coin balances are deleted and new balances are derived if some blocks lost consensus",
%{consensus_block: %{number: block_number} = block, options: options} do
%{hash: address_hash} = address = insert(:address)

@ -0,0 +1,144 @@
defmodule Explorer.Migrator.SanitizeDuplicatedLogIndexLogsTest do
use Explorer.DataCase, async: false
alias Explorer.Chain.Cache.BackgroundMigrations
alias Explorer.Chain.Log
alias Explorer.Chain.TokenTransfer
alias Explorer.Chain.Token.Instance
alias Explorer.Migrator.{SanitizeDuplicatedLogIndexLogs, MigrationStatus}
describe "Sanitize duplicated log index logs" do
test "correctly identifies and updates duplicated log index logs" do
block = insert(:block)
tx1 = :transaction |> insert() |> with_block(block, index: 0)
tx2 = :transaction |> insert() |> with_block(block, index: 1)
_log1 = insert(:log, transaction: tx1, index: 3, data: "0x01", block: block, block_number: block.number)
_log2 = insert(:log, transaction: tx1, index: 0, data: "0x02", block: block, block_number: block.number)
_log3 = insert(:log, transaction: tx2, index: 3, data: "0x03", block: block, block_number: block.number)
log4 = insert(:log)
assert MigrationStatus.get_status("sanitize_duplicated_log_index_logs") == nil
SanitizeDuplicatedLogIndexLogs.start_link([])
Process.sleep(300)
assert MigrationStatus.get_status("sanitize_duplicated_log_index_logs") == "completed"
assert BackgroundMigrations.get_sanitize_duplicated_log_index_logs_finished() == true
updated_logs = Repo.all(Log |> where([log], log.block_number == ^block.number) |> order_by([log], asc: log.index))
assert match?(
[
%{index: 0, data: %Explorer.Chain.Data{bytes: <<2>>}},
%{index: 1, data: %Explorer.Chain.Data{bytes: <<1>>}},
%{index: 2, data: %Explorer.Chain.Data{bytes: <<3>>}}
],
updated_logs
)
assert %Log{log4 | address: nil, block: nil, transaction: nil} == %Log{
Repo.one(Log |> where([log], log.block_number != ^block.number))
| address: nil,
block: nil,
transaction: nil
}
end
test "correctly identifies and updates duplicated log index logs & updates corresponding token transfers and token instances" do
block = insert(:block)
token_address = insert(:contract_address)
insert(:token, contract_address: token_address, type: "ERC-721")
instance = insert(:token_instance, token_contract_address_hash: token_address.hash)
tx1 = :transaction |> insert() |> with_block(block, index: 0)
tx2 = :transaction |> insert() |> with_block(block, index: 1)
log1 = insert(:log, transaction: tx1, index: 3, data: "0x01", block: block, block_number: block.number)
log2 = insert(:log, transaction: tx1, index: 0, data: "0x02", block: block, block_number: block.number)
log3 = insert(:log, transaction: tx2, index: 3, data: "0x03", block: block, block_number: block.number)
log4 = insert(:log)
_tt1 =
insert(:token_transfer,
token_type: "ERC-721",
block: block,
block_number: block.number,
log_index: log1.index,
token_ids: [instance.token_id],
token_contract_address: token_address,
token_contract_address_hash: token_address.hash,
transaction: tx1,
transaction_hash: tx1.hash,
block_hash: block.hash
)
_tt2 =
insert(:token_transfer,
block: block,
block_number: block.number,
log_index: log2.index,
transaction: tx1,
transaction_hash: tx1.hash
)
_tt3 =
insert(:token_transfer,
block: block,
block_number: block.number,
log_index: log3.index,
transaction: tx2,
transaction_hash: tx2.hash
)
Instance.changeset(instance, %{owner_updated_at_block: block.number, owner_updated_at_log_index: log1.index})
|> Repo.update!()
assert MigrationStatus.get_status("sanitize_duplicated_log_index_logs") == nil
SanitizeDuplicatedLogIndexLogs.start_link([])
Process.sleep(300)
assert MigrationStatus.get_status("sanitize_duplicated_log_index_logs") == "completed"
assert BackgroundMigrations.get_sanitize_duplicated_log_index_logs_finished() == true
updated_logs = Repo.all(Log |> where([log], log.block_number == ^block.number) |> order_by([log], asc: log.index))
assert match?(
[
%{index: 0, data: %Explorer.Chain.Data{bytes: <<2>>}},
%{index: 1, data: %Explorer.Chain.Data{bytes: <<1>>}},
%{index: 2, data: %Explorer.Chain.Data{bytes: <<3>>}}
],
updated_logs
)
block_number = block.number
assert [%{owner_updated_at_block: ^block_number, owner_updated_at_log_index: 1}] = Repo.all(Instance)
assert [%{log_index: 1, block_number: ^block_number}] =
Repo.all(TokenTransfer |> where([tt], tt.token_type == "ERC-721"))
assert %Log{log4 | address: nil, block: nil, transaction: nil} == %Log{
Repo.one(Log |> where([log], log.block_number != ^block.number))
| address: nil,
block: nil,
transaction: nil
}
end
test "correctly handles cases where there are no duplicated log index logs" do
assert MigrationStatus.get_status("sanitize_duplicated_log_index_logs") == nil
SanitizeDuplicatedLogIndexLogs.start_link([])
Process.sleep(100)
assert MigrationStatus.get_status("sanitize_duplicated_log_index_logs") == "completed"
assert BackgroundMigrations.get_sanitize_duplicated_log_index_logs_finished() == true
end
end
end

@ -825,11 +825,12 @@ defmodule Explorer.Factory do
token_address = insert(:contract_address, contract_code: contract_code)
token = insert(:token, contract_address: token_address)
block = build(:block)
%TokenTransfer{
block: build(:block),
block: block,
amount: Decimal.new(1),
block_number: block_number(),
block_number: block.number,
from_address: from_address,
to_address: to_address,
token_contract_address: token_address,

@ -160,7 +160,7 @@ defmodule Indexer.Block.Fetcher do
%{logs: receipt_logs, receipts: receipts} = receipt_params,
transactions_with_receipts = Receipts.put(transactions_params_without_receipts, receipts),
celo_epoch_logs = CeloEpochLogs.fetch(blocks, json_rpc_named_arguments),
logs = receipt_logs ++ celo_epoch_logs,
logs = maybe_set_new_log_index(receipt_logs) ++ celo_epoch_logs,
%{token_transfers: token_transfers, tokens: tokens} = TokenTransfers.parse(logs),
%{token_transfers: celo_native_token_transfers, tokens: celo_tokens} =
CeloTransactionTokenTransfers.parse_transactions(transactions_with_receipts),
@ -754,4 +754,22 @@ defmodule Indexer.Block.Fetcher do
defp async_match_arbitrum_messages_to_l2(transactions_with_messages_from_l1) do
ArbitrumMessagesToL2Matcher.async_discover_match(transactions_with_messages_from_l1)
end
# workaround for cases when RPC send logs with same index within one block
defp maybe_set_new_log_index(logs) do
logs
|> Enum.group_by(& &1.block_hash)
|> Enum.map(fn {block_hash, logs_per_block} ->
if logs_per_block |> Enum.frequencies_by(& &1.index) |> Map.values() |> Enum.max() == 1 do
logs_per_block
else
Logger.error("Found logs with same index within one block: #{block_hash}")
logs_per_block
|> Enum.sort_by(&{&1.transaction_index, &1.index, &1.transaction_hash})
|> Enum.with_index(&%{&1 | index: &2})
end
end)
|> List.flatten()
end
end

@ -181,8 +181,8 @@
"errora",
"errorb",
"erts",
"Ethash",
"ethaccount",
"Ethash",
"etherchain",
"ethprice",
"ethsupply",
@ -561,14 +561,15 @@
"Txns",
"txpool",
"txreceipt",
"typname",
"ueberauth",
"ufixed",
"uncatalog",
"unclosable",
"unfetched",
"unfinalized",
"unindexed",
"Unichain",
"unindexed",
"Unitarion",
"Unitorius",
"Unitorus",

Loading…
Cancel
Save