Merge branch 'master' into ab-get-rid-if-ex-json-schema-warnings

pull/2723/head
Ayrat Badykov 5 years ago committed by GitHub
commit 4d20dc4709
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      CHANGELOG.md
  2. 6
      apps/block_scout_web/assets/js/lib/try_api.js
  3. 2
      apps/block_scout_web/lib/block_scout_web/controllers/address_controller.ex
  4. 2
      apps/block_scout_web/lib/block_scout_web/controllers/chain_controller.ex
  5. 2
      apps/block_scout_web/lib/block_scout_web/notifier.ex
  6. 8
      apps/block_scout_web/lib/block_scout_web/templates/transaction_log/_logs.html.eex
  7. 5
      apps/block_scout_web/priv/gettext/default.pot
  8. 5
      apps/block_scout_web/priv/gettext/en/LC_MESSAGES/default.po
  9. 16
      apps/block_scout_web/test/block_scout_web/channels/address_channel_test.exs
  10. 10
      apps/block_scout_web/test/block_scout_web/controllers/address_controller_test.exs
  11. 4
      apps/block_scout_web/test/block_scout_web/controllers/api/rpc/address_controller_test.exs
  12. 4
      apps/block_scout_web/test/block_scout_web/controllers/api/rpc/eth_controller_test.exs
  13. 8
      apps/block_scout_web/test/block_scout_web/controllers/chain_controller_test.exs
  14. 6
      apps/block_scout_web/test/block_scout_web/features/viewing_addresses_test.exs
  15. 6
      apps/block_scout_web/test/block_scout_web/features/viewing_app_test.exs
  16. 34
      apps/block_scout_web/test/block_scout_web/features/viewing_chain_test.exs
  17. 5
      apps/explorer/config/config.exs
  18. 2
      apps/explorer/config/test.exs
  19. 1
      apps/explorer/lib/explorer/application.ex
  20. 23
      apps/explorer/lib/explorer/chain.ex
  21. 10
      apps/explorer/lib/explorer/chain/address.ex
  22. 246
      apps/explorer/lib/explorer/chain/import/runner/blocks.ex
  23. 125
      apps/explorer/lib/explorer/counters/addresses_counter.ex
  24. 12
      apps/explorer/test/explorer/chain/import/runner/blocks_test.exs
  25. 17
      apps/explorer/test/explorer/chain_test.exs
  26. 16
      apps/explorer/test/explorer/counters/addresses_counter_test.exs

@ -1,6 +1,7 @@
## Current
### Features
- [#2717](https://github.com/poanetwork/blockscout/pull/2717) - Improve speed of nonconsensus data removal
- [#2679](https://github.com/poanetwork/blockscout/pull/2679) - added fixed height for card chain blocks and card chain transactions
- [#2678](https://github.com/poanetwork/blockscout/pull/2678) - fixed dashboard banner height bug
- [#2672](https://github.com/poanetwork/blockscout/pull/2672) - added new theme for xUSDT
@ -10,6 +11,7 @@
- [#2663](https://github.com/poanetwork/blockscout/pull/2663) - Fetch address counters in parallel
### Fixes
- [#2718](https://github.com/poanetwork/blockscout/pull/2718) - Include all addresses taking part in transactions in wallets' addresses counter
- [#2709](https://github.com/poanetwork/blockscout/pull/2709) - Fix stuck label and value for uncle block height
- [#2707](https://github.com/poanetwork/blockscout/pull/2707) - fix for dashboard banner chart legend items
- [#2706](https://github.com/poanetwork/blockscout/pull/2706) - fix empty total_supply in coin gecko response
@ -17,6 +19,7 @@
- [#2696](https://github.com/poanetwork/blockscout/pull/2696) - do not update fetched_coin_balance with nil
- [#2693](https://github.com/poanetwork/blockscout/pull/2693) - remove non consensus internal transactions
- [#2691](https://github.com/poanetwork/blockscout/pull/2691) - fix exchange rate websocket update for Rootstock
- [#2688](https://github.com/poanetwork/blockscout/pull/2688) - fix try it out section
- [#2687](https://github.com/poanetwork/blockscout/pull/2687) - remove non-consensus token transfers, logs when inserting new consensus blocks
- [#2684](https://github.com/poanetwork/blockscout/pull/2684) - do not filter pending logs
- [#2682](https://github.com/poanetwork/blockscout/pull/2682) - Use Task.start instead of Task.async in caches
@ -26,6 +29,7 @@
### Chore
- [#2724](https://github.com/poanetwork/blockscout/pull/2724) - fix ci by commenting a line in hackney library
- [#2708](https://github.com/poanetwork/blockscout/pull/2708) - add log index to logs view
- [#2723](https://github.com/poanetwork/blockscout/pull/2723) - get rid of ex_json_schema warnings

@ -55,10 +55,6 @@ function handleSuccess (query, xhr, clickedButton) {
clickedButton.prop('disabled', false)
}
function dropDomain (url) {
return new URL(url).pathname
}
// Show 'Try it out' UI for a module/action.
$('button[data-selector*="btn-try-api"]').click(event => {
const clickedButton = $(event.target)
@ -128,7 +124,7 @@ $('button[data-try-api-ui-button-type="execute"]').click(event => {
}
$.ajax({
url: dropDomain(composeRequestUrl(query)),
url: composeRequestUrl(query),
success: (_data, _status, xhr) => {
handleSuccess(query, xhr, clickedButton)
},

@ -61,7 +61,7 @@ defmodule BlockScoutWeb.AddressController do
def index(conn, _params) do
render(conn, "index.html",
current_path: current_path(conn),
address_count: Chain.count_addresses_with_balance_from_cache()
address_count: Chain.count_addresses_from_cache()
)
end

@ -28,7 +28,7 @@ defmodule BlockScoutWeb.ChainController do
render(
conn,
"show.html",
address_count: Chain.count_addresses_with_balance_from_cache(),
address_count: Chain.count_addresses_from_cache(),
average_block_time: AverageBlockTime.average_block_time(),
exchange_rate: exchange_rate,
chart_data_path: market_history_chart_path(conn, :show),

@ -14,7 +14,7 @@ defmodule BlockScoutWeb.Notifier do
alias Phoenix.View
def handle_event({:chain_event, :addresses, type, addresses}) when type in [:realtime, :on_demand] do
Endpoint.broadcast("addresses:new_address", "count", %{count: Chain.count_addresses_with_balance_from_cache()})
Endpoint.broadcast("addresses:new_address", "count", %{count: Chain.count_addresses_from_cache()})
addresses
|> Stream.reject(fn %Address{fetched_coin_balance: fetched_coin_balance} -> is_nil(fetched_coin_balance) end)

@ -185,5 +185,13 @@
</div>
<% end %>
</dd>
<dt class="col-md-2">
<%= gettext "Log Index" %>
</dt>
<dd class="col-md-10">
<div class="text-dark raw-transaction-log-index">
<%= @log.index %>
</div>
</dd>
</dl>
</div>

@ -1841,3 +1841,8 @@ msgstr ""
#: lib/block_scout_web/views/address_contract_view.ex:22
msgid "true"
msgstr ""
#, elixir-format
#: lib/block_scout_web/templates/transaction_log/_logs.html.eex:189
msgid "Log Index"
msgstr ""

@ -1841,3 +1841,8 @@ msgstr ""
#: lib/block_scout_web/views/address_contract_view.ex:22
msgid "true"
msgstr ""
#, elixir-format
#: lib/block_scout_web/templates/transaction_log/_logs.html.eex:189
msgid "Log Index"
msgstr ""

@ -1,11 +1,11 @@
defmodule BlockScoutWeb.AddressChannelTest do
use BlockScoutWeb.ChannelCase,
# ETS tables are shared in `Explorer.Counters.AddressesWithBalanceCounter`
# ETS tables are shared in `Explorer.Counters.AddressesCounter`
async: false
alias BlockScoutWeb.UserSocket
alias BlockScoutWeb.Notifier
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.AddressesCounter
test "subscribed user is notified of new_address count event" do
topic = "addresses:new_address"
@ -13,8 +13,8 @@ defmodule BlockScoutWeb.AddressChannelTest do
address = insert(:address)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
Notifier.handle_event({:chain_event, :addresses, :realtime, [address]})
@ -55,8 +55,8 @@ defmodule BlockScoutWeb.AddressChannelTest do
test "notified of balance_update for matching address", %{address: address, topic: topic} do
address_with_balance = %{address | fetched_coin_balance: 1}
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
Notifier.handle_event({:chain_event, :addresses, :realtime, [address_with_balance]})
@ -67,8 +67,8 @@ defmodule BlockScoutWeb.AddressChannelTest do
end
test "not notified of balance_update if fetched_coin_balance is nil", %{address: address} do
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
Notifier.handle_event({:chain_event, :addresses, :realtime, [address]})

@ -3,7 +3,7 @@ defmodule BlockScoutWeb.AddressControllerTest do
# ETS tables are shared in `Explorer.Counters.*`
async: false
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.AddressesCounter
describe "GET index/2" do
test "returns top addresses", %{conn: conn} do
@ -12,8 +12,8 @@ defmodule BlockScoutWeb.AddressControllerTest do
|> Enum.map(&insert(:address, fetched_coin_balance: &1))
|> Enum.map(& &1.hash)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
conn = get(conn, address_path(conn, :index, %{type: "JSON"}))
{:ok, %{"items" => items}} = Poison.decode(conn.resp_body)
@ -25,8 +25,8 @@ defmodule BlockScoutWeb.AddressControllerTest do
address = insert(:address, fetched_coin_balance: 1)
insert(:address_name, address: address, primary: true, name: "POA Wallet")
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
conn = get(conn, address_path(conn, :index, %{type: "JSON"}))

@ -6,7 +6,7 @@ defmodule BlockScoutWeb.API.RPC.AddressControllerTest do
alias BlockScoutWeb.API.RPC.AddressController
alias Explorer.Chain
alias Explorer.Chain.{Events.Subscriber, Transaction, Wei}
alias Explorer.Counters.{AddressesWithBalanceCounter, AverageBlockTime}
alias Explorer.Counters.{AddressesCounter, AverageBlockTime}
alias Indexer.Fetcher.CoinBalanceOnDemand
alias Explorer.Repo
@ -22,7 +22,7 @@ defmodule BlockScoutWeb.API.RPC.AddressControllerTest do
start_supervised!({Task.Supervisor, name: Indexer.TaskSupervisor})
start_supervised!(AverageBlockTime)
start_supervised!({CoinBalanceOnDemand, [mocked_json_rpc_named_arguments, [name: CoinBalanceOnDemand]]})
start_supervised!(AddressesWithBalanceCounter)
start_supervised!(AddressesCounter)
Application.put_env(:explorer, AverageBlockTime, enabled: true)

@ -1,7 +1,7 @@
defmodule BlockScoutWeb.API.RPC.EthControllerTest do
use BlockScoutWeb.ConnCase, async: false
alias Explorer.Counters.{AddressesWithBalanceCounter, AverageBlockTime}
alias Explorer.Counters.{AddressesCounter, AverageBlockTime}
alias Explorer.Repo
alias Indexer.Fetcher.CoinBalanceOnDemand
@ -14,7 +14,7 @@ defmodule BlockScoutWeb.API.RPC.EthControllerTest do
start_supervised!({Task.Supervisor, name: Indexer.TaskSupervisor})
start_supervised!(AverageBlockTime)
start_supervised!({CoinBalanceOnDemand, [mocked_json_rpc_named_arguments, [name: CoinBalanceOnDemand]]})
start_supervised!(AddressesWithBalanceCounter)
start_supervised!(AddressesCounter)
Application.put_env(:explorer, AverageBlockTime, enabled: true)

@ -1,18 +1,18 @@
defmodule BlockScoutWeb.ChainControllerTest do
use BlockScoutWeb.ConnCase,
# ETS table is shared in `Explorer.Counters.AddressesWithBalanceCounter`
# ETS table is shared in `Explorer.Counters.AddressesCounter`
async: false
import BlockScoutWeb.WebRouter.Helpers, only: [chain_path: 2, block_path: 3, transaction_path: 3, address_path: 3]
alias Explorer.Chain.Block
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.AddressesCounter
setup do
Supervisor.terminate_child(Explorer.Supervisor, Explorer.Chain.Cache.Blocks.child_id())
Supervisor.restart_child(Explorer.Supervisor, Explorer.Chain.Cache.Blocks.child_id())
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
:ok
end

@ -3,7 +3,7 @@ defmodule BlockScoutWeb.ViewingAddressesTest do
# Because ETS tables is shared for `Explorer.Counters.*`
async: false
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.AddressesCounter
alias BlockScoutWeb.{AddressPage, AddressView, Notifier}
setup do
@ -58,8 +58,8 @@ defmodule BlockScoutWeb.ViewingAddressesTest do
[first_address | _] = addresses
[last_address | _] = Enum.reverse(addresses)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> AddressPage.visit_page()

@ -5,11 +5,11 @@ defmodule BlockScoutWeb.ViewingAppTest do
alias BlockScoutWeb.AppPage
alias BlockScoutWeb.Counters.BlocksIndexedCounter
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.AddressesCounter
setup do
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
:ok
end

@ -7,7 +7,7 @@ defmodule BlockScoutWeb.ViewingChainTest do
alias BlockScoutWeb.{AddressPage, BlockPage, ChainPage, TransactionPage}
alias Explorer.Chain.Block
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.AddressesCounter
setup do
Supervisor.terminate_child(Explorer.Supervisor, Explorer.Chain.Cache.Blocks.child_id())
@ -35,8 +35,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
test "search for address", %{session: session} do
address = insert(:address)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()
@ -49,8 +49,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
test "search for blocks from chain page", %{session: session} do
block = insert(:block, number: 6)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()
@ -59,8 +59,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
end
test "blocks list", %{session: session} do
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()
@ -70,8 +70,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
test "inserts place holder blocks on render for out of order blocks", %{session: session} do
insert(:block, number: 409)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()
@ -84,8 +84,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
test "search for transactions", %{session: session} do
transaction = insert(:transaction)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()
@ -94,8 +94,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
end
test "transactions list", %{session: session} do
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()
@ -111,8 +111,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
|> with_contract_creation(contract_address)
|> with_block(block)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()
@ -138,8 +138,8 @@ defmodule BlockScoutWeb.ViewingChainTest do
token_contract_address: contract_token_address
)
start_supervised!(AddressesWithBalanceCounter)
AddressesWithBalanceCounter.consolidate()
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
session
|> ChainPage.visit_page()

@ -47,6 +47,11 @@ balances_update_interval =
end
config :explorer, Explorer.Counters.AddressesWithBalanceCounter,
enabled: false,
enable_consolidation: true,
update_interval_in_seconds: balances_update_interval || 30 * 60
config :explorer, Explorer.Counters.AddressesCounter,
enabled: true,
enable_consolidation: true,
update_interval_in_seconds: balances_update_interval || 30 * 60

@ -21,6 +21,8 @@ config :explorer, Explorer.Counters.AverageBlockTime, enabled: false
config :explorer, Explorer.Counters.AddressesWithBalanceCounter, enabled: false, enable_consolidation: false
config :explorer, Explorer.Counters.AddressesCounter, enabled: false, enable_consolidation: false
config :explorer, Explorer.Market.History.Cataloger, enabled: false
config :explorer, Explorer.Tracer, disabled?: false

@ -68,6 +68,7 @@ defmodule Explorer.Application do
configure(Explorer.KnownTokens),
configure(Explorer.Market.History.Cataloger),
configure(Explorer.Counters.AddressesWithBalanceCounter),
configure(Explorer.Counters.AddressesCounter),
configure(Explorer.Counters.AverageBlockTime),
configure(Explorer.Validator.MetadataProcessor),
configure(Explorer.Staking.EpochCounter)

@ -58,7 +58,7 @@ defmodule Explorer.Chain do
}
alias Explorer.Chain.Import.Runner
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.{AddressesCounter, AddressesWithBalanceCounter}
alias Explorer.Market.MarketHistoryCache
alias Explorer.{PagingOptions, Repo}
@ -118,6 +118,14 @@ defmodule Explorer.Chain do
AddressesWithBalanceCounter.fetch()
end
@doc """
Gets from the cache the count of all `t:Explorer.Chain.Address.t/0`'s
"""
@spec count_addresses_from_cache :: non_neg_integer()
def count_addresses_from_cache do
AddressesCounter.fetch()
end
@doc """
Counts the number of addresses with fetched coin balance > 0.
@ -131,6 +139,19 @@ defmodule Explorer.Chain do
)
end
@doc """
Counts the number of all addresses.
This function should be used with caution. In larger databases, it may take a
while to have the return back.
"""
def count_addresses do
Repo.one(
Address.count(),
timeout: :infinity
)
end
@doc """
`t:Explorer.Chain.InternalTransaction/0`s from the address with the given `hash`.

@ -237,6 +237,16 @@ defmodule Explorer.Chain.Address do
)
end
@doc """
Counts all the addresses.
"""
def count do
from(
a in Address,
select: fragment("COUNT(*)")
)
end
defimpl String.Chars do
@doc """
Uses `hash` as string representation, formatting it according to the eip-55 specification

@ -46,32 +46,14 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
hashes = Enum.map(changes_list, & &1.hash)
consensus_block_numbers = consensus_block_numbers(changes_list)
where_invalid_neighbour = where_invalid_neighbour(changes_list)
# Enforce ShareLocks tables order (see docs: sharelocks.md)
multi
|> Multi.run(:acquire_blocks, fn repo, _ ->
acquire_blocks(repo, hashes, consensus_block_numbers, where_invalid_neighbour)
end)
|> Multi.run(:lose_consensus, fn repo, _ ->
lose_consensus(repo, consensus_block_numbers, insert_options)
end)
|> Multi.run(:lose_invalid_neighbour_consensus, fn repo, _ ->
lose_invalid_neighbour_consensus(repo, where_invalid_neighbour, insert_options)
end)
|> Multi.run(:nonconsensus_block_numbers, fn _repo,
%{
lose_consensus: lost_consensus_blocks,
lose_invalid_neighbour_consensus: lost_consensus_neighbours
} ->
nonconsensus_block_numbers =
(lost_consensus_blocks ++ lost_consensus_neighbours)
|> Enum.sort()
|> Enum.dedup()
{:ok, nonconsensus_block_numbers}
lose_consensus(repo, hashes, consensus_block_numbers, changes_list, insert_options)
end)
|> Multi.run(:blocks, fn repo, _ ->
# Note, needs to be executed after `lose_consensus` for lock acquisition
insert(repo, changes_list, insert_options)
end)
|> Multi.run(:uncle_fetched_block_second_degree_relations, fn repo, %{blocks: blocks} when is_list(blocks) ->
@ -101,27 +83,14 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
transactions: transactions
})
end)
|> Multi.run(:remove_nonconsensus_logs, fn repo,
%{
nonconsensus_block_numbers: nonconsensus_block_numbers,
fork_transactions: transactions
} ->
remove_nonconsensus_logs(repo, nonconsensus_block_numbers, transactions, insert_options)
|> Multi.run(:remove_nonconsensus_logs, fn repo, %{derive_transaction_forks: transactions} ->
remove_nonconsensus_logs(repo, transactions, insert_options)
end)
|> Multi.run(:acquire_internal_transactions, fn repo,
%{
nonconsensus_block_numbers: nonconsensus_block_numbers,
fork_transactions: transactions
} ->
acquire_internal_transactions(repo, nonconsensus_block_numbers, hashes, transactions)
|> Multi.run(:acquire_internal_transactions, fn repo, %{derive_transaction_forks: transactions} ->
acquire_internal_transactions(repo, hashes, transactions)
end)
|> Multi.run(:remove_nonconsensus_internal_transactions, fn repo,
%{
nonconsensus_block_numbers:
nonconsensus_block_numbers,
fork_transactions: transactions
} ->
remove_nonconsensus_internal_transactions(repo, nonconsensus_block_numbers, transactions, insert_options)
|> Multi.run(:remove_nonconsensus_internal_transactions, fn repo, %{derive_transaction_forks: transactions} ->
remove_nonconsensus_internal_transactions(repo, transactions, insert_options)
end)
|> Multi.run(:internal_transaction_transaction_block_number, fn repo, _ ->
update_internal_transaction_block_number(repo, hashes)
@ -129,9 +98,8 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
|> Multi.run(:acquire_contract_address_tokens, fn repo, _ ->
acquire_contract_address_tokens(repo, consensus_block_numbers)
end)
|> Multi.run(:remove_nonconsensus_token_transfers, fn repo,
%{nonconsensus_block_numbers: nonconsensus_block_numbers} ->
remove_nonconsensus_token_transfers(repo, nonconsensus_block_numbers, insert_options)
|> Multi.run(:remove_nonconsensus_token_transfers, fn repo, %{derive_transaction_forks: transactions} ->
remove_nonconsensus_token_transfers(repo, transactions, insert_options)
end)
|> Multi.run(:delete_address_token_balances, fn repo, _ ->
delete_address_token_balances(repo, consensus_block_numbers, insert_options)
@ -159,22 +127,6 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
@impl Runner
def timeout, do: @timeout
defp acquire_blocks(repo, hashes, consensus_block_numbers, where_invalid_neighbour) do
query =
from(
block in where_invalid_neighbour,
or_where: block.number in ^consensus_block_numbers,
or_where: block.hash in ^hashes,
select: block.hash,
# Enforce Block ShareLocks order (see docs: sharelocks.md)
order_by: [asc: block.hash],
lock: "FOR UPDATE"
)
blocks = repo.all(query)
{:ok, blocks}
end
defp acquire_contract_address_tokens(repo, consensus_block_numbers) do
query =
from(address_current_token_balance in Address.CurrentTokenBalance,
@ -187,15 +139,12 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
Tokens.acquire_contract_address_tokens(repo, contract_address_hashes)
end
defp acquire_internal_transactions(repo, nonconsensus_block_numbers, hashes, forked_transactions) do
forked_transaction_hashes = Enum.map(forked_transactions, & &1.hash)
defp acquire_internal_transactions(repo, hashes, forked_transaction_hashes) do
query =
from(internal_transaction in InternalTransaction,
join: transaction in Transaction,
on: internal_transaction.transaction_hash == transaction.hash,
where: transaction.block_number in ^nonconsensus_block_numbers,
or_where: transaction.block_hash in ^hashes,
where: transaction.block_hash in ^hashes,
or_where: transaction.hash in ^forked_transaction_hashes,
select: {internal_transaction.transaction_hash, internal_transaction.index},
# Enforce InternalTransaction ShareLocks order (see docs: sharelocks.md)
@ -229,14 +178,11 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
lock: "FOR UPDATE"
)
transactions = repo.all(query)
hashes = Enum.map(transactions, & &1.hash)
update_query =
from(
t in Transaction,
where: t.hash in ^hashes,
join: s in subquery(query),
on: t.hash == s.hash,
update: [
set: [
block_hash: nil,
@ -250,17 +196,19 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
updated_at: ^updated_at
]
],
select: t.hash
select: %{
block_hash: s.block_hash,
index: s.index,
hash: s.hash
}
)
try do
{_num, _res} = repo.update_all(update_query, [], timeout: timeout)
{_num, transactions} = repo.update_all(update_query, [], timeout: timeout)
{:ok, transactions}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error}}
end
{:ok, transactions}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error}}
end
defp derive_transaction_forks(%{
@ -283,7 +231,7 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
# Enforce Fork ShareLocks order (see docs: sharelocks.md)
|> Enum.sort_by(&{&1.uncle_hash, &1.index})
{_total, result} =
{_total, forked_transaction} =
repo.insert_all(
Transaction.Fork,
transaction_forks,
@ -294,11 +242,11 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
update: [set: [hash: fragment("EXCLUDED.hash")]],
where: fragment("EXCLUDED.hash <> ?", transaction_fork.hash)
),
returning: [:uncle_hash, :hash],
returning: [:hash],
timeout: timeout
)
{:ok, result}
{:ok, Enum.map(forked_transaction, & &1.hash)}
end
@spec insert(Repo.t(), [map()], %{
@ -364,47 +312,48 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
|> Enum.map(& &1.number)
end
defp lose_consensus(_, [], _), do: {:ok, []}
defp lose_consensus(repo, consensus_block_number, %{timeout: timeout, timestamps: %{updated_at: updated_at}})
when is_list(consensus_block_number) do
# ShareLocks order already enforced by `acquire_blocks` (see docs: sharelocks.md)
{_, result} =
repo.update_all(
from(block in Block, where: block.number in ^consensus_block_number, select: block.number),
[set: [consensus: false, updated_at: updated_at]],
timeout: timeout
)
{:ok, result}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, consensus_block_numbers: consensus_block_number}}
end
defp lose_invalid_neighbour_consensus(repo, where_invalid_neighbour, %{
defp lose_consensus(repo, hashes, consensus_block_numbers, changes_list, %{
timeout: timeout,
timestamps: %{updated_at: updated_at}
}) do
# ShareLocks order already enforced by `acquire_blocks` (see docs: sharelocks.md)
{_, result} =
acquire_query =
from(
block in where_invalid_neighbour(changes_list),
or_where: block.number in ^consensus_block_numbers,
# we also need to acquire blocks that will be upserted here, for ordering
or_where: block.hash in ^hashes,
select: block.hash,
# Enforce Block ShareLocks order (see docs: sharelocks.md)
order_by: [asc: block.hash],
lock: "FOR UPDATE"
)
{_, removed_consensus_block_hashes} =
repo.update_all(
from(block in where_invalid_neighbour, select: block.number),
from(
block in Block,
join: s in subquery(acquire_query),
on: block.hash == s.hash,
# we don't want to remove consensus from blocks that will be upserted
where: block.hash not in ^hashes,
select: block.hash
),
[set: [consensus: false, updated_at: updated_at]],
timeout: timeout
)
{:ok, result}
{:ok, removed_consensus_block_hashes}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, where_invalid_neighbour: where_invalid_neighbour}}
{:error, %{exception: postgrex_error, consensus_block_numbers: consensus_block_numbers}}
end
defp remove_nonconsensus_token_transfers(repo, nonconsensus_block_numbers, %{timeout: timeout}) do
defp remove_nonconsensus_token_transfers(repo, forked_transaction_hashes, %{timeout: timeout}) do
ordered_token_transfers =
from(token_transfer in TokenTransfer,
where: token_transfer.block_number in ^nonconsensus_block_numbers,
select: map(token_transfer, [:transaction_hash, :log_index]),
from(
token_transfer in TokenTransfer,
where: token_transfer.transaction_hash in ^forked_transaction_hashes,
select: token_transfer.transaction_hash,
# Enforce TokenTransfer ShareLocks order (see docs: sharelocks.md)
order_by: [
token_transfer.transaction_hash,
@ -417,91 +366,60 @@ defmodule Explorer.Chain.Import.Runner.Blocks do
from(token_transfer in TokenTransfer,
select: map(token_transfer, [:transaction_hash, :log_index]),
inner_join: ordered_token_transfer in subquery(ordered_token_transfers),
on:
ordered_token_transfer.transaction_hash ==
token_transfer.transaction_hash and
ordered_token_transfer.log_index == token_transfer.log_index
on: ordered_token_transfer.transaction_hash == token_transfer.transaction_hash
)
try do
{_count, deleted_token_transfers} = repo.delete_all(query, timeout: timeout)
{_count, deleted_token_transfers} = repo.delete_all(query, timeout: timeout)
{:ok, deleted_token_transfers}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, block_numbers: nonconsensus_block_numbers}}
end
{:ok, deleted_token_transfers}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, transactions: forked_transaction_hashes}}
end
defp remove_nonconsensus_internal_transactions(repo, nonconsensus_block_numbers, forked_transactions, %{
timeout: timeout
}) do
forked_transaction_hashes = Enum.map(forked_transactions, & &1.hash)
transaction_query =
from(transaction in Transaction,
where: transaction.block_number in ^nonconsensus_block_numbers,
or_where: transaction.hash in ^forked_transaction_hashes,
select: map(transaction, [:hash])
)
defp remove_nonconsensus_internal_transactions(repo, forked_transaction_hashes, %{timeout: timeout}) do
query =
from(internal_transaction in InternalTransaction,
inner_join: transaction in subquery(transaction_query),
on: internal_transaction.transaction_hash == transaction.hash,
where: internal_transaction.transaction_hash in ^forked_transaction_hashes,
select: map(internal_transaction, [:transaction_hash, :index])
)
try do
# ShareLocks order already enforced by `acquire_internal_transactions` (see docs: sharelocks.md)
{_count, deleted_internal_transactions} = repo.delete_all(query, timeout: timeout)
# ShareLocks order already enforced by `acquire_internal_transactions` (see docs: sharelocks.md)
{_count, deleted_internal_transactions} = repo.delete_all(query, timeout: timeout)
{:ok, deleted_internal_transactions}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, block_numbers: nonconsensus_block_numbers}}
end
{:ok, deleted_internal_transactions}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, transactions: forked_transaction_hashes}}
end
defp remove_nonconsensus_logs(repo, nonconsensus_block_numbers, forked_transactions, %{timeout: timeout}) do
forked_transaction_hashes = Enum.map(forked_transactions, & &1.hash)
transaction_query =
from(transaction in Transaction,
where: transaction.block_number in ^nonconsensus_block_numbers,
or_where: transaction.hash in ^forked_transaction_hashes,
select: map(transaction, [:hash]),
order_by: transaction.hash
)
defp remove_nonconsensus_logs(repo, forked_transaction_hashes, %{timeout: timeout}) do
ordered_logs =
from(log in Log,
inner_join: transaction in subquery(transaction_query),
on: log.transaction_hash == transaction.hash,
select: map(log, [:transaction_hash, :index]),
from(
log in Log,
where: log.transaction_hash in ^forked_transaction_hashes,
select: log.transaction_hash,
# Enforce Log ShareLocks order (see docs: sharelocks.md)
order_by: [
log.transaction_hash,
log.index
],
lock: "FOR UPDATE OF l0"
lock: "FOR UPDATE"
)
query =
from(log in Log,
select: map(log, [:transaction_hash, :index]),
inner_join: ordered_log in subquery(ordered_logs),
on: ordered_log.transaction_hash == log.transaction_hash and ordered_log.index == log.index
on: ordered_log.transaction_hash == log.transaction_hash
)
try do
{_count, deleted_logs} = repo.delete_all(query, timeout: timeout)
{_count, deleted_logs} = repo.delete_all(query, timeout: timeout)
{:ok, deleted_logs}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, block_numbers: nonconsensus_block_numbers}}
end
{:ok, deleted_logs}
rescue
postgrex_error in Postgrex.Error ->
{:error, %{exception: postgrex_error, transactions: forked_transaction_hashes}}
end
defp delete_address_token_balances(_, [], _), do: {:ok, []}

@ -0,0 +1,125 @@
defmodule Explorer.Counters.AddressesCounter do
@moduledoc """
Caches the number of all addresses.
It loads the count asynchronously and in a time interval of 30 minutes.
"""
use GenServer
alias Explorer.Chain
@table :addresses_counter
@cache_key "addresses"
def table_name do
@table
end
def cache_key do
@cache_key
end
# It is undesirable to automatically start the consolidation in all environments.
# Consider the test environment: if the consolidation initiates but does not
# finish before a test ends, that test will fail. This way, hundreds of
# tests were failing before disabling the consolidation and the scheduler in
# the test env.
config = Application.get_env(:explorer, Explorer.Counters.AddressesCounter)
@enable_consolidation Keyword.get(config, :enable_consolidation)
@update_interval_in_seconds Keyword.get(config, :update_interval_in_seconds)
@doc """
Starts a process to periodically update the counter of the token holders.
"""
@spec start_link(term()) :: GenServer.on_start()
def start_link(_) do
GenServer.start_link(__MODULE__, :ok, name: __MODULE__)
end
@impl true
def init(_args) do
create_table()
{:ok, %{consolidate?: enable_consolidation?()}, {:continue, :ok}}
end
def create_table do
opts = [
:set,
:named_table,
:public,
read_concurrency: true
]
:ets.new(table_name(), opts)
end
defp schedule_next_consolidation do
Process.send_after(self(), :consolidate, :timer.seconds(@update_interval_in_seconds))
end
@doc """
Inserts new items into the `:ets` table.
"""
def insert_counter({key, info}) do
:ets.insert(table_name(), {key, info})
end
@impl true
def handle_continue(:ok, %{consolidate?: true} = state) do
consolidate()
schedule_next_consolidation()
{:noreply, state}
end
@impl true
def handle_continue(:ok, state) do
{:noreply, state}
end
@impl true
def handle_info(:consolidate, state) do
consolidate()
schedule_next_consolidation()
{:noreply, state}
end
@doc """
Fetches the info for a specific item from the `:ets` table.
"""
def fetch do
do_fetch(:ets.lookup(table_name(), cache_key()))
end
defp do_fetch([{_, result}]), do: result
defp do_fetch([]), do: 0
@doc """
Consolidates the info by populating the `:ets` table with the current database information.
"""
def consolidate do
counter = Chain.count_addresses()
insert_counter({cache_key(), counter})
end
@doc """
Returns a boolean that indicates whether consolidation is enabled
In order to choose whether or not to enable the scheduler and the initial
consolidation, change the following Explorer config:
`config :explorer, Explorer.Counters.AddressesCounter, enable_consolidation: true`
to:
`config :explorer, Explorer.Counters.AddressesCounter, enable_consolidation: false`
"""
def enable_consolidation?, do: @enable_consolidation
end

@ -117,10 +117,12 @@ defmodule Explorer.Chain.Import.Runner.BlocksTest do
test "remove_nonconsensus_token_transfers deletes token transfer rows with matching block number when new consensus block is inserted",
%{consensus_block: %{number: block_number} = block, options: options} do
insert(:block, number: block_number, consensus: true)
consensus_block = insert(:block, number: block_number, consensus: true)
transaction = insert(:transaction) |> with_block(consensus_block)
%TokenTransfer{transaction_hash: transaction_hash, log_index: log_index} =
insert(:token_transfer, block_number: block_number, transaction: insert(:transaction))
insert(:token_transfer, block_number: block_number, transaction: transaction)
assert count(TokenTransfer) == 1
@ -136,7 +138,11 @@ defmodule Explorer.Chain.Import.Runner.BlocksTest do
test "remove_nonconsensus_token_transfers does not delete token transfer rows with matching block number when new consensus block wasn't inserted",
%{consensus_block: %{number: block_number} = block, options: options} do
insert(:token_transfer, block_number: block_number, transaction: insert(:transaction))
consensus_block = insert(:block, number: block_number, consensus: true)
transaction = insert(:transaction) |> with_block(consensus_block)
insert(:token_transfer, block_number: block_number, transaction: transaction)
count = 1

@ -27,6 +27,7 @@ defmodule Explorer.ChainTest do
alias Explorer.Chain.Supply.ProofOfAuthority
alias Explorer.Counters.AddressesWithBalanceCounter
alias Explorer.Counters.AddressesCounter
doctest Explorer.Chain
@ -50,6 +51,22 @@ defmodule Explorer.ChainTest do
end
end
describe "count_addresses_from_cache/0" do
test "returns the number of all addresses" do
insert(:address, fetched_coin_balance: 0)
insert(:address, fetched_coin_balance: 1)
insert(:address, fetched_coin_balance: 2)
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
addresses_with_balance = Chain.count_addresses_from_cache()
assert is_integer(addresses_with_balance)
assert addresses_with_balance == 3
end
end
describe "last_db_block_status/0" do
test "return no_blocks errors if db is empty" do
assert {:error, :no_blocks} = Chain.last_db_block_status()

@ -0,0 +1,16 @@
defmodule Explorer.Counters.AddressesCounterTest do
use Explorer.DataCase
alias Explorer.Counters.AddressesCounter
test "populates the cache with the number of all addresses" do
insert(:address, fetched_coin_balance: 0)
insert(:address, fetched_coin_balance: 1)
insert(:address, fetched_coin_balance: 2)
start_supervised!(AddressesCounter)
AddressesCounter.consolidate()
assert AddressesCounter.fetch() == 3
end
end
Loading…
Cancel
Save