feat: Add `INDEXER_OPTIMISM_L1_DEPOSITS_TRANSACTION_TYPE` env variable (#10674)

* fix: Use latest block when safe is not determined for OP chains

* Add INDEXER_OPTIMISM_L1_DEPOSITS_TX_TYPE env for OP Deposits indexing module

* Small refactoring

* Fix specs for get_logs function

---------

Co-authored-by: POA <33550681+poa@users.noreply.github.com>
pull/10745/head
varasev 2 months ago committed by GitHub
parent e12b010a0e
commit a47893d4fd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 22
      apps/indexer/lib/indexer/fetcher/optimism.ex
  2. 43
      apps/indexer/lib/indexer/fetcher/optimism/deposit.ex
  3. 2
      apps/indexer/lib/indexer/fetcher/optimism/withdrawal.ex
  4. 34
      apps/indexer/lib/indexer/fetcher/polygon_zkevm/bridge.ex
  5. 4
      apps/indexer/lib/indexer/fetcher/polygon_zkevm/bridge_l1.ex
  6. 3
      apps/indexer/lib/indexer/fetcher/polygon_zkevm/bridge_l2.ex
  7. 2
      apps/indexer/lib/indexer/fetcher/rollup_l1_reorg_monitor.ex
  8. 15
      apps/indexer/lib/indexer/helper.ex
  9. 4
      config/runtime.exs
  10. 1
      docker-compose/envs/common-blockscout.env

@ -56,7 +56,7 @@ defmodule Indexer.Fetcher.Optimism do
""" """
@spec get_block_check_interval(list()) :: {:ok, non_neg_integer(), non_neg_integer()} | {:error, any()} @spec get_block_check_interval(list()) :: {:ok, non_neg_integer(), non_neg_integer()} | {:error, any()}
def get_block_check_interval(json_rpc_named_arguments) do def get_block_check_interval(json_rpc_named_arguments) do
{last_safe_block, _} = get_safe_block(json_rpc_named_arguments) {last_safe_block, _} = Helper.get_safe_block(json_rpc_named_arguments)
first_block = max(last_safe_block - @block_check_interval_range_size, 1) first_block = max(last_safe_block - @block_check_interval_range_size, 1)
@ -91,26 +91,6 @@ defmodule Indexer.Fetcher.Optimism do
) )
end end
@doc """
Tries to get `safe` block number from the RPC node.
If it's not available, gets the `latest` one.
Returns a tuple of `{block_number, is_latest}`
where `is_latest` is true if the `safe` is not available.
"""
@spec get_safe_block(list()) :: {non_neg_integer(), boolean()}
def get_safe_block(json_rpc_named_arguments) do
case get_block_number_by_tag("safe", json_rpc_named_arguments) do
{:ok, safe_block} ->
{safe_block, false}
{:error, :not_found} ->
{:ok, latest_block} =
get_block_number_by_tag("latest", json_rpc_named_arguments, Helper.infinite_retries_number())
{latest_block, true}
end
end
defp get_block_timestamp_by_number_inner(number, json_rpc_named_arguments) do defp get_block_timestamp_by_number_inner(number, json_rpc_named_arguments) do
result = result =
%{id: 0, number: number} %{id: 0, number: number}

@ -28,6 +28,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
:safe_block, :safe_block,
:optimism_portal, :optimism_portal,
:json_rpc_named_arguments, :json_rpc_named_arguments,
:transaction_type,
mode: :catch_up, mode: :catch_up,
filter_id: nil, filter_id: nil,
check_interval: nil check_interval: nil
@ -78,7 +79,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
{last_l1_block_number, last_l1_tx_hash} <- get_last_l1_item(), {last_l1_block_number, last_l1_tx_hash} <- get_last_l1_item(),
{:ok, last_l1_tx} <- Optimism.get_transaction_by_hash(last_l1_tx_hash, json_rpc_named_arguments), {:ok, last_l1_tx} <- Optimism.get_transaction_by_hash(last_l1_tx_hash, json_rpc_named_arguments),
{:l1_tx_not_found, false} <- {:l1_tx_not_found, !is_nil(last_l1_tx_hash) && is_nil(last_l1_tx)}, {:l1_tx_not_found, false} <- {:l1_tx_not_found, !is_nil(last_l1_tx_hash) && is_nil(last_l1_tx)},
{safe_block, _} = Optimism.get_safe_block(json_rpc_named_arguments), {safe_block, _} = Helper.get_safe_block(json_rpc_named_arguments),
{:start_block_l1_valid, true} <- {:start_block_l1_valid, true} <-
{:start_block_l1_valid, {:start_block_l1_valid,
(start_block_l1 <= last_l1_block_number || last_l1_block_number == 0) && start_block_l1 <= safe_block} do (start_block_l1 <= last_l1_block_number || last_l1_block_number == 0) && start_block_l1 <= safe_block} do
@ -97,7 +98,8 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
safe_block: safe_block, safe_block: safe_block,
optimism_portal: optimism_portal, optimism_portal: optimism_portal,
json_rpc_named_arguments: json_rpc_named_arguments, json_rpc_named_arguments: json_rpc_named_arguments,
batch_size: parse_integer(env[:batch_size]) || @batch_size batch_size: parse_integer(env[:batch_size]) || @batch_size,
transaction_type: env[:transaction_type]
}} }}
else else
{:start_block_l1_valid, false} -> {:start_block_l1_valid, false} ->
@ -144,7 +146,8 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
optimism_portal: optimism_portal, optimism_portal: optimism_portal,
json_rpc_named_arguments: json_rpc_named_arguments, json_rpc_named_arguments: json_rpc_named_arguments,
mode: :catch_up, mode: :catch_up,
batch_size: batch_size batch_size: batch_size,
transaction_type: transaction_type
} = state } = state
) do ) do
to_block = min(from_block + batch_size, safe_block) to_block = min(from_block + batch_size, safe_block)
@ -160,7 +163,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
3 3
)}, )},
_ = Helper.log_blocks_chunk_handling(from_block, to_block, start_block, safe_block, nil, :L1), _ = Helper.log_blocks_chunk_handling(from_block, to_block, start_block, safe_block, nil, :L1),
deposits = events_to_deposits(logs, json_rpc_named_arguments), deposits = events_to_deposits(logs, transaction_type, json_rpc_named_arguments),
{:import, {:ok, _imported}} <- {:import, {:ok, _imported}} <-
{:import, Chain.import(%{optimism_deposits: %{params: deposits}, timeout: :infinity})} do {:import, Chain.import(%{optimism_deposits: %{params: deposits}, timeout: :infinity})} do
Publisher.broadcast(%{optimism_deposits: deposits}, :realtime) Publisher.broadcast(%{optimism_deposits: deposits}, :realtime)
@ -212,7 +215,8 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
optimism_portal: optimism_portal, optimism_portal: optimism_portal,
json_rpc_named_arguments: json_rpc_named_arguments, json_rpc_named_arguments: json_rpc_named_arguments,
batch_size: batch_size, batch_size: batch_size,
mode: :catch_up mode: :catch_up,
transaction_type: transaction_type
} = state } = state
) do ) do
with {:check_interval, {:ok, check_interval, new_safe}} <- with {:check_interval, {:ok, check_interval, new_safe}} <-
@ -236,7 +240,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
@transaction_deposited_event, @transaction_deposited_event,
json_rpc_named_arguments json_rpc_named_arguments
) do ) do
handle_new_logs(logs, json_rpc_named_arguments) handle_new_logs(logs, transaction_type, json_rpc_named_arguments)
Process.send(self(), :fetch, []) Process.send(self(), :fetch, [])
{:noreply, %{state | mode: :realtime, filter_id: filter_id, check_interval: check_interval}} {:noreply, %{state | mode: :realtime, filter_id: filter_id, check_interval: check_interval}}
else else
@ -268,12 +272,13 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
json_rpc_named_arguments: json_rpc_named_arguments, json_rpc_named_arguments: json_rpc_named_arguments,
mode: :realtime, mode: :realtime,
filter_id: filter_id, filter_id: filter_id,
check_interval: check_interval check_interval: check_interval,
transaction_type: transaction_type
} = state } = state
) do ) do
case get_filter_changes(filter_id, json_rpc_named_arguments) do case get_filter_changes(filter_id, json_rpc_named_arguments) do
{:ok, logs} -> {:ok, logs} ->
handle_new_logs(logs, json_rpc_named_arguments) handle_new_logs(logs, transaction_type, json_rpc_named_arguments)
Process.send_after(self(), :fetch, check_interval) Process.send_after(self(), :fetch, check_interval)
{:noreply, state} {:noreply, state}
@ -342,7 +347,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
:ok :ok
end end
defp handle_new_logs(logs, json_rpc_named_arguments) do defp handle_new_logs(logs, transaction_type, json_rpc_named_arguments) do
{reorgs, logs_to_parse, min_block, max_block, cnt} = {reorgs, logs_to_parse, min_block, max_block, cnt} =
logs logs
|> Enum.reduce({MapSet.new(), [], nil, 0, 0}, fn |> Enum.reduce({MapSet.new(), [], nil, 0, 0}, fn
@ -362,7 +367,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
handle_reorgs(reorgs) handle_reorgs(reorgs)
unless Enum.empty?(logs_to_parse) do unless Enum.empty?(logs_to_parse) do
deposits = events_to_deposits(logs_to_parse, json_rpc_named_arguments) deposits = events_to_deposits(logs_to_parse, transaction_type, json_rpc_named_arguments)
{:ok, _imported} = Chain.import(%{optimism_deposits: %{params: deposits}, timeout: :infinity}) {:ok, _imported} = Chain.import(%{optimism_deposits: %{params: deposits}, timeout: :infinity})
Publisher.broadcast(%{optimism_deposits: deposits}, :realtime) Publisher.broadcast(%{optimism_deposits: deposits}, :realtime)
@ -378,7 +383,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
end end
end end
defp events_to_deposits(logs, json_rpc_named_arguments) do defp events_to_deposits(logs, transaction_type, json_rpc_named_arguments) do
timestamps = timestamps =
logs logs
|> Enum.reduce(MapSet.new(), fn %{"blockNumber" => block_number_quantity}, acc -> |> Enum.reduce(MapSet.new(), fn %{"blockNumber" => block_number_quantity}, acc ->
@ -399,7 +404,7 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
%{} %{}
end end
Enum.map(logs, &event_to_deposit(&1, timestamps)) Enum.map(logs, &event_to_deposit(&1, timestamps, transaction_type))
end end
defp event_to_deposit( defp event_to_deposit(
@ -411,7 +416,8 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
"topics" => [_, @address_prefix <> from_stripped, @address_prefix <> to_stripped, _], "topics" => [_, @address_prefix <> from_stripped, @address_prefix <> to_stripped, _],
"data" => opaque_data "data" => opaque_data
}, },
timestamps timestamps,
transaction_type
) do ) do
{_, prefixed_block_hash} = (String.pad_leading("", 64, "0") <> stripped_block_hash) |> String.split_at(-64) {_, prefixed_block_hash} = (String.pad_leading("", 64, "0") <> stripped_block_hash) |> String.split_at(-64)
{_, prefixed_log_index} = (String.pad_leading("", 64, "0") <> stripped_log_index) |> String.split_at(-64) {_, prefixed_log_index} = (String.pad_leading("", 64, "0") <> stripped_log_index) |> String.split_at(-64)
@ -452,8 +458,17 @@ defmodule Indexer.Fetcher.Optimism.Deposit do
encoding: :hex encoding: :hex
) )
transaction_type =
transaction_type
|> Integer.to_string(16)
|> String.downcase()
l2_tx_hash = l2_tx_hash =
"0x" <> ("7e#{rlp_encoded}" |> Base.decode16!(case: :mixed) |> ExKeccak.hash_256() |> Base.encode16(case: :lower)) "0x" <>
((transaction_type <> "#{rlp_encoded}")
|> Base.decode16!(case: :mixed)
|> ExKeccak.hash_256()
|> Base.encode16(case: :lower))
block_number = quantity_to_integer(block_number_quantity) block_number = quantity_to_integer(block_number_quantity)

@ -57,7 +57,7 @@ defmodule Indexer.Fetcher.Optimism.Withdrawal do
false <- is_nil(start_block_l2), false <- is_nil(start_block_l2),
true <- start_block_l2 > 0, true <- start_block_l2 > 0,
{last_l2_block_number, last_l2_transaction_hash} <- get_last_l2_item(), {last_l2_block_number, last_l2_transaction_hash} <- get_last_l2_item(),
{safe_block, safe_block_is_latest} = Optimism.get_safe_block(json_rpc_named_arguments), {safe_block, safe_block_is_latest} = Helper.get_safe_block(json_rpc_named_arguments),
{:start_block_l2_valid, true} <- {:start_block_l2_valid, true} <-
{:start_block_l2_valid, {:start_block_l2_valid,
(start_block_l2 <= last_l2_block_number || last_l2_block_number == 0) && start_block_l2 <= safe_block}, (start_block_l2 <= last_l2_block_number || last_l2_block_number == 0) && start_block_l2 <= safe_block},

@ -7,10 +7,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
import EthereumJSONRPC, import EthereumJSONRPC,
only: [ only: [
integer_to_quantity: 1,
json_rpc: 2,
quantity_to_integer: 1, quantity_to_integer: 1,
request: 1,
timestamp_to_datetime: 1 timestamp_to_datetime: 1
] ]
@ -82,40 +79,19 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
@spec get_logs_all({non_neg_integer(), non_neg_integer()}, binary(), list()) :: list() @spec get_logs_all({non_neg_integer(), non_neg_integer()}, binary(), list()) :: list()
def get_logs_all({chunk_start, chunk_end}, bridge_contract, json_rpc_named_arguments) do def get_logs_all({chunk_start, chunk_end}, bridge_contract, json_rpc_named_arguments) do
{:ok, result} = {:ok, result} =
get_logs( IndexerHelper.get_logs(
chunk_start, chunk_start,
chunk_end, chunk_end,
bridge_contract, bridge_contract,
[[@bridge_event, @claim_event_v1, @claim_event_v2]], [[@bridge_event, @claim_event_v1, @claim_event_v2]],
json_rpc_named_arguments json_rpc_named_arguments,
0,
IndexerHelper.infinite_retries_number()
) )
Logs.elixir_to_params(result) Logs.elixir_to_params(result)
end end
defp get_logs(from_block, to_block, address, topics, json_rpc_named_arguments, retries \\ 100_000_000) do
processed_from_block = if is_integer(from_block), do: integer_to_quantity(from_block), else: from_block
processed_to_block = if is_integer(to_block), do: integer_to_quantity(to_block), else: to_block
req =
request(%{
id: 0,
method: "eth_getLogs",
params: [
%{
:fromBlock => processed_from_block,
:toBlock => processed_to_block,
:address => address,
:topics => topics
}
]
})
error_message = &"Cannot fetch logs for the block range #{from_block}..#{to_block}. Error: #{inspect(&1)}"
IndexerHelper.repeated_call(&json_rpc/2, [req, json_rpc_named_arguments], error_message, retries)
end
@doc """ @doc """
Imports the given zkEVM bridge operations into database. Imports the given zkEVM bridge operations into database.
Used by Indexer.Fetcher.PolygonZkevm.BridgeL1 and Indexer.Fetcher.PolygonZkevm.BridgeL2 fetchers. Used by Indexer.Fetcher.PolygonZkevm.BridgeL1 and Indexer.Fetcher.PolygonZkevm.BridgeL2 fetchers.
@ -241,7 +217,7 @@ defmodule Indexer.Fetcher.PolygonZkevm.Bridge do
defp blocks_to_timestamps(events, json_rpc_named_arguments) do defp blocks_to_timestamps(events, json_rpc_named_arguments) do
events events
|> IndexerHelper.get_blocks_by_events(json_rpc_named_arguments, 100_000_000) |> IndexerHelper.get_blocks_by_events(json_rpc_named_arguments, IndexerHelper.infinite_retries_number())
|> Enum.reduce(%{}, fn block, acc -> |> Enum.reduce(%{}, fn block, acc ->
block_number = quantity_to_integer(Map.get(block, "number")) block_number = quantity_to_integer(Map.get(block, "number"))
timestamp = timestamp_to_datetime(Map.get(block, "timestamp")) timestamp = timestamp_to_datetime(Map.get(block, "timestamp"))

@ -219,7 +219,9 @@ defmodule Indexer.Fetcher.PolygonZkevm.BridgeL1 do
end) end)
new_start_block = last_written_block + 1 new_start_block = last_written_block + 1
{:ok, new_end_block} = Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, 100_000_000)
{:ok, new_end_block} =
Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, Helper.infinite_retries_number())
delay = delay =
if new_end_block == last_written_block do if new_end_block == last_written_block do

@ -71,7 +71,8 @@ defmodule Indexer.Fetcher.PolygonZkevm.BridgeL2 do
false <- is_nil(start_block), false <- is_nil(start_block),
true <- start_block > 0, true <- start_block > 0,
{last_l2_block_number, last_l2_transaction_hash} = Reader.last_l2_item(), {last_l2_block_number, last_l2_transaction_hash} = Reader.last_l2_item(),
{:ok, latest_block} = Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, 100_000_000), {:ok, latest_block} =
Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, Helper.infinite_retries_number()),
{:start_block_valid, true} <- {:start_block_valid, true} <-
{:start_block_valid, {:start_block_valid,
(start_block <= last_l2_block_number || last_l2_block_number == 0) && start_block <= latest_block}, (start_block <= last_l2_block_number || last_l2_block_number == 0) && start_block <= latest_block},

@ -114,7 +114,7 @@ defmodule Indexer.Fetcher.RollupL1ReorgMonitor do
prev_latest: prev_latest prev_latest: prev_latest
} = state } = state
) do ) do
{:ok, latest} = Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, 100_000_000) {:ok, latest} = Helper.get_block_number_by_tag("latest", json_rpc_named_arguments, Helper.infinite_retries_number())
if latest < prev_latest do if latest < prev_latest do
Logger.warning("Reorg detected: previous latest block ##{prev_latest}, current latest block ##{latest}.") Logger.warning("Reorg detected: previous latest block ##{prev_latest}, current latest block ##{latest}.")

@ -108,9 +108,9 @@ defmodule Indexer.Helper do
first_block = max(last_safe_block - @block_check_interval_range_size, 1) first_block = max(last_safe_block - @block_check_interval_range_size, 1)
with {:ok, first_block_timestamp} <- with {:ok, first_block_timestamp} <-
get_block_timestamp_by_number(first_block, json_rpc_named_arguments, 100_000_000), get_block_timestamp_by_number(first_block, json_rpc_named_arguments, @infinite_retries_number),
{:ok, last_safe_block_timestamp} <- {:ok, last_safe_block_timestamp} <-
get_block_timestamp_by_number(last_safe_block, json_rpc_named_arguments, 100_000_000) do get_block_timestamp_by_number(last_safe_block, json_rpc_named_arguments, @infinite_retries_number) do
block_check_interval = block_check_interval =
ceil((last_safe_block_timestamp - first_block_timestamp) / (last_safe_block - first_block) * 1000 / 2) ceil((last_safe_block_timestamp - first_block_timestamp) / (last_safe_block - first_block) * 1000 / 2)
@ -139,8 +139,9 @@ defmodule Indexer.Helper do
{:ok, safe_block} -> {:ok, safe_block} ->
{safe_block, false} {safe_block, false}
{:error, :not_found} -> {:error, _} ->
{:ok, latest_block} = get_block_number_by_tag("latest", json_rpc_named_arguments, 100_000_000) {:ok, latest_block} = get_block_number_by_tag("latest", json_rpc_named_arguments, @infinite_retries_number)
{latest_block, true} {latest_block, true}
end end
end end
@ -251,14 +252,14 @@ defmodule Indexer.Helper do
non_neg_integer() | binary(), non_neg_integer() | binary(),
non_neg_integer() | binary(), non_neg_integer() | binary(),
binary(), binary(),
[binary()], [binary()] | [list()],
EthereumJSONRPC.json_rpc_named_arguments() EthereumJSONRPC.json_rpc_named_arguments()
) :: {:error, atom() | binary() | map()} | {:ok, any()} ) :: {:error, atom() | binary() | map()} | {:ok, any()}
@spec get_logs( @spec get_logs(
non_neg_integer() | binary(), non_neg_integer() | binary(),
non_neg_integer() | binary(), non_neg_integer() | binary(),
binary(), binary(),
[binary()], [binary()] | [list()],
EthereumJSONRPC.json_rpc_named_arguments(), EthereumJSONRPC.json_rpc_named_arguments(),
integer() integer()
) :: {:error, atom() | binary() | map()} | {:ok, any()} ) :: {:error, atom() | binary() | map()} | {:ok, any()}
@ -266,7 +267,7 @@ defmodule Indexer.Helper do
non_neg_integer() | binary(), non_neg_integer() | binary(),
non_neg_integer() | binary(), non_neg_integer() | binary(),
binary(), binary(),
[binary()], [binary()] | [list()],
EthereumJSONRPC.json_rpc_named_arguments(), EthereumJSONRPC.json_rpc_named_arguments(),
integer(), integer(),
non_neg_integer() non_neg_integer()

@ -858,7 +858,9 @@ config :indexer, Indexer.Fetcher.Optimism,
optimism_l1_rpc: System.get_env("INDEXER_OPTIMISM_L1_RPC"), optimism_l1_rpc: System.get_env("INDEXER_OPTIMISM_L1_RPC"),
optimism_l1_system_config: System.get_env("INDEXER_OPTIMISM_L1_SYSTEM_CONFIG_CONTRACT") optimism_l1_system_config: System.get_env("INDEXER_OPTIMISM_L1_SYSTEM_CONFIG_CONTRACT")
config :indexer, Indexer.Fetcher.Optimism.Deposit, batch_size: System.get_env("INDEXER_OPTIMISM_L1_DEPOSITS_BATCH_SIZE") config :indexer, Indexer.Fetcher.Optimism.Deposit,
batch_size: System.get_env("INDEXER_OPTIMISM_L1_DEPOSITS_BATCH_SIZE"),
transaction_type: ConfigHelper.parse_integer_env_var("INDEXER_OPTIMISM_L1_DEPOSITS_TRANSACTION_TYPE", 126)
config :indexer, Indexer.Fetcher.Optimism.OutputRoot, config :indexer, Indexer.Fetcher.Optimism.OutputRoot,
output_oracle: System.get_env("INDEXER_OPTIMISM_L1_OUTPUT_ORACLE_CONTRACT") output_oracle: System.get_env("INDEXER_OPTIMISM_L1_OUTPUT_ORACLE_CONTRACT")

@ -288,6 +288,7 @@ INDEXER_DISABLE_INTERNAL_TRANSACTIONS_FETCHER=false
# INDEXER_OPTIMISM_L2_WITHDRAWALS_START_BLOCK= # INDEXER_OPTIMISM_L2_WITHDRAWALS_START_BLOCK=
# INDEXER_OPTIMISM_L2_MESSAGE_PASSER_CONTRACT= # INDEXER_OPTIMISM_L2_MESSAGE_PASSER_CONTRACT=
# INDEXER_OPTIMISM_L1_DEPOSITS_BATCH_SIZE= # INDEXER_OPTIMISM_L1_DEPOSITS_BATCH_SIZE=
# INDEXER_OPTIMISM_L1_DEPOSITS_TRANSACTION_TYPE=
# ROOTSTOCK_REMASC_ADDRESS= # ROOTSTOCK_REMASC_ADDRESS=
# ROOTSTOCK_BRIDGE_ADDRESS= # ROOTSTOCK_BRIDGE_ADDRESS=
# ROOTSTOCK_LOCKED_BTC_CACHE_PERIOD= # ROOTSTOCK_LOCKED_BTC_CACHE_PERIOD=

Loading…
Cancel
Save