@ -1,20 +0,0 @@ |
|||||||
# Upgrading Guide |
|
||||||
|
|
||||||
### Migration scripts |
|
||||||
|
|
||||||
There is in the project a `scripts` folder that contains `SQL` files responsible to migrate data from the database. |
|
||||||
|
|
||||||
This script should be used if you already have an indexed database with a large amount of data. |
|
||||||
|
|
||||||
#### `address_current_token_balances_in_batches.sql` |
|
||||||
|
|
||||||
Is responsible to populate a new table using the `token_balances` table information. |
|
||||||
|
|
||||||
#### `internal_transaction_update_in_batches.sql` |
|
||||||
|
|
||||||
Is responsible to migrate data from the `transactions` table to the `internal_transactions` one in order to improve the application listing performance; |
|
||||||
|
|
||||||
#### `transaction_update_in_baches.sql` |
|
||||||
|
|
||||||
Parity call traces contain the input, but it was not put in the internal_transactions_params. |
|
||||||
Enforce input and call_type being non-NULL for calls in new constraints on internal_transactions. |
|
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 3.6 KiB After Width: | Height: | Size: 3.6 KiB |
@ -0,0 +1,34 @@ |
|||||||
|
<section class="container"> |
||||||
|
<div class="card"> |
||||||
|
<div class="card-body"> |
||||||
|
<h1 class="card-title margin-bottom-sm"><%= gettext("ETH RPC API Documentation") %></h2> |
||||||
|
<p class="api-text-monospace" data-endpoint-url="<%= BlockScoutWeb.Endpoint.url() %>/api/eth_rpc">[ <%= gettext "Base URL:" %> <%= @conn.host %>/api/eth_rpc ]</p> |
||||||
|
<p class="card-subtitle margin-bottom-0"> |
||||||
|
<%= gettext "This API is provided to support some rpc methods in the exact format specified for ethereum nodes, which can be found " %> |
||||||
|
|
||||||
|
<a href="https://github.com/ethereum/wiki/wiki/JSON-RPC"><%= gettext "here." %></a> |
||||||
|
<%= gettext "This is useful to allow sending requests to blockscout without having to change anything about the request." %> |
||||||
|
<%= gettext "However, in general, the" %> <%= link( |
||||||
|
gettext("custom RPC"), |
||||||
|
to: api_docs_path(@conn, :index) |
||||||
|
) %> <%= gettext " is recommended." %> |
||||||
|
<%= gettext "Anything not in this list is not supported. Click on the method to be taken to the documentation for that method, and check the notes section for any potential differences." %> |
||||||
|
</p> |
||||||
|
</div> |
||||||
|
</div> |
||||||
|
<div class="card"> |
||||||
|
<div class="card-body"> |
||||||
|
<table class="table"> |
||||||
|
<tr> |
||||||
|
<th>Supported Method</th> |
||||||
|
<th>Notes</th> |
||||||
|
</tr> |
||||||
|
<%= for {method, info} <- Map.to_list(@documentation) do %> |
||||||
|
<tr> |
||||||
|
<td> <a href="https://github.com/ethereum/wiki/wiki/JSON-RPC#<%= method %>"> <%= method %> </a> </td> |
||||||
|
<td> <%= Map.get(info, :notes, "N/A") %> </td> |
||||||
|
</tr> |
||||||
|
<% end %> |
||||||
|
</table> |
||||||
|
</div> |
||||||
|
</section> |
@ -0,0 +1,103 @@ |
|||||||
|
defmodule Explorer.Chain.Supply.RSK do |
||||||
|
@moduledoc """ |
||||||
|
Defines the supply API for calculating supply for coins from RSK. |
||||||
|
""" |
||||||
|
|
||||||
|
use Explorer.Chain.Supply |
||||||
|
|
||||||
|
import Ecto.Query, only: [from: 2] |
||||||
|
|
||||||
|
alias Explorer.Chain.Address.CoinBalance |
||||||
|
alias Explorer.Chain.{Block, Wei} |
||||||
|
alias Explorer.ExchangeRates.Token |
||||||
|
alias Explorer.{Market, Repo} |
||||||
|
|
||||||
|
def market_cap(exchange_rate) do |
||||||
|
circulating() * exchange_rate.usd_value |
||||||
|
end |
||||||
|
|
||||||
|
@doc "Equivalent to getting the circulating value " |
||||||
|
def supply_for_days(days) do |
||||||
|
now = Timex.now() |
||||||
|
|
||||||
|
balances_query = |
||||||
|
from(balance in CoinBalance, |
||||||
|
join: block in Block, |
||||||
|
on: block.number == balance.block_number, |
||||||
|
where: block.consensus == true, |
||||||
|
where: balance.address_hash == ^"0x0000000000000000000000000000000001000006", |
||||||
|
where: block.timestamp > ^Timex.shift(now, days: -days), |
||||||
|
distinct: fragment("date_trunc('day', ?)", block.timestamp), |
||||||
|
select: {block.timestamp, balance.value} |
||||||
|
) |
||||||
|
|
||||||
|
balance_before_query = |
||||||
|
from(balance in CoinBalance, |
||||||
|
join: block in Block, |
||||||
|
on: block.number == balance.block_number, |
||||||
|
where: block.consensus == true, |
||||||
|
where: balance.address_hash == ^"0x0000000000000000000000000000000001000006", |
||||||
|
where: block.timestamp <= ^Timex.shift(Timex.now(), days: -days), |
||||||
|
order_by: [desc: block.timestamp], |
||||||
|
limit: 1, |
||||||
|
select: balance.value |
||||||
|
) |
||||||
|
|
||||||
|
by_day = |
||||||
|
balances_query |
||||||
|
|> Repo.all() |
||||||
|
|> Enum.into(%{}, fn {timestamp, value} -> |
||||||
|
{Timex.to_date(timestamp), value} |
||||||
|
end) |
||||||
|
|
||||||
|
starting = Repo.one(balance_before_query) || wei!(0) |
||||||
|
|
||||||
|
result = |
||||||
|
-days..0 |
||||||
|
|> Enum.reduce({%{}, starting.value}, fn i, {days, last} -> |
||||||
|
date = |
||||||
|
now |
||||||
|
|> Timex.shift(days: i) |
||||||
|
|> Timex.to_date() |
||||||
|
|
||||||
|
case Map.get(by_day, date) do |
||||||
|
nil -> |
||||||
|
{Map.put(days, date, last), last} |
||||||
|
|
||||||
|
value -> |
||||||
|
{Map.put(days, date, value.value), value.value} |
||||||
|
end |
||||||
|
end) |
||||||
|
|> elem(0) |
||||||
|
|
||||||
|
{:ok, result} |
||||||
|
end |
||||||
|
|
||||||
|
def circulating do |
||||||
|
query = |
||||||
|
from(balance in CoinBalance, |
||||||
|
join: block in Block, |
||||||
|
on: block.number == balance.block_number, |
||||||
|
where: block.consensus == true, |
||||||
|
where: balance.address_hash == ^"0x0000000000000000000000000000000001000006", |
||||||
|
order_by: [desc: block.timestamp], |
||||||
|
limit: 1, |
||||||
|
select: balance.value |
||||||
|
) |
||||||
|
|
||||||
|
Repo.one(query) || wei!(0) |
||||||
|
end |
||||||
|
|
||||||
|
defp wei!(value) do |
||||||
|
{:ok, wei} = Wei.cast(value) |
||||||
|
wei |
||||||
|
end |
||||||
|
|
||||||
|
def total do |
||||||
|
21_000_000 |
||||||
|
end |
||||||
|
|
||||||
|
def exchange_rate do |
||||||
|
Market.get_exchange_rate(Explorer.coin()) || Token.null() |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,79 @@ |
|||||||
|
defmodule Explorer.Market.MarketHistoryCache do |
||||||
|
@moduledoc """ |
||||||
|
Caches recent market history. |
||||||
|
""" |
||||||
|
|
||||||
|
import Ecto.Query, only: [from: 2] |
||||||
|
|
||||||
|
alias Explorer.Market.MarketHistory |
||||||
|
alias Explorer.Repo |
||||||
|
|
||||||
|
@cache_name :market_history |
||||||
|
@last_update_key :last_update |
||||||
|
@history_key :history |
||||||
|
# 6 hours |
||||||
|
@cache_period 1_000 * 60 * 60 * 6 |
||||||
|
@recent_days 30 |
||||||
|
|
||||||
|
def fetch do |
||||||
|
if cache_expired?() do |
||||||
|
update_cache() |
||||||
|
else |
||||||
|
fetch_from_cache(@history_key) |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
def cache_name, do: @cache_name |
||||||
|
|
||||||
|
def data_key, do: @history_key |
||||||
|
|
||||||
|
def updated_at_key, do: @last_update_key |
||||||
|
|
||||||
|
def recent_days_count, do: @recent_days |
||||||
|
|
||||||
|
defp cache_expired? do |
||||||
|
updated_at = fetch_from_cache(@last_update_key) |
||||||
|
|
||||||
|
cond do |
||||||
|
is_nil(updated_at) -> true |
||||||
|
current_time() - updated_at > @cache_period -> true |
||||||
|
true -> false |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp update_cache do |
||||||
|
new_data = fetch_from_db() |
||||||
|
|
||||||
|
put_into_cache(@last_update_key, current_time()) |
||||||
|
put_into_cache(@history_key, new_data) |
||||||
|
|
||||||
|
new_data |
||||||
|
end |
||||||
|
|
||||||
|
defp fetch_from_db do |
||||||
|
day_diff = @recent_days * -1 |
||||||
|
|
||||||
|
query = |
||||||
|
from( |
||||||
|
mh in MarketHistory, |
||||||
|
where: mh.date > date_add(^Date.utc_today(), ^day_diff, "day"), |
||||||
|
order_by: [desc: mh.date] |
||||||
|
) |
||||||
|
|
||||||
|
Repo.all(query) |
||||||
|
end |
||||||
|
|
||||||
|
defp fetch_from_cache(key) do |
||||||
|
ConCache.get(@cache_name, key) |
||||||
|
end |
||||||
|
|
||||||
|
defp put_into_cache(key, value) do |
||||||
|
ConCache.put(@cache_name, key, value) |
||||||
|
end |
||||||
|
|
||||||
|
defp current_time do |
||||||
|
utc_now = DateTime.utc_now() |
||||||
|
|
||||||
|
DateTime.to_unix(utc_now, :millisecond) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,139 @@ |
|||||||
|
defmodule Explorer.Chain.Supply.RSKTest do |
||||||
|
use Explorer.DataCase |
||||||
|
|
||||||
|
alias Explorer.Chain.Supply.RSK |
||||||
|
alias Explorer.Chain.Wei |
||||||
|
|
||||||
|
@coin_address "0x0000000000000000000000000000000001000006" |
||||||
|
|
||||||
|
defp wei!(value) do |
||||||
|
{:ok, wei} = Wei.cast(value) |
||||||
|
wei |
||||||
|
end |
||||||
|
|
||||||
|
test "total is 21_000_000" do |
||||||
|
assert RSK.total() == 21_000_000 |
||||||
|
end |
||||||
|
|
||||||
|
describe "circulating/0" do |
||||||
|
test "with no balance" do |
||||||
|
assert RSK.circulating() == wei!(0) |
||||||
|
end |
||||||
|
|
||||||
|
test "with a balance" do |
||||||
|
address = insert(:address, hash: @coin_address) |
||||||
|
insert(:block, number: 0) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 10, address_hash: address.hash, block_number: 0) |
||||||
|
|
||||||
|
assert RSK.circulating() == wei!(10) |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp date(now, shift \\ []) do |
||||||
|
now |
||||||
|
|> Timex.shift(shift) |
||||||
|
|> Timex.to_date() |
||||||
|
end |
||||||
|
|
||||||
|
defp dec(number) do |
||||||
|
Decimal.new(number) |
||||||
|
end |
||||||
|
|
||||||
|
describe "supply_for_days/1" do |
||||||
|
test "when there is no balance" do |
||||||
|
now = Timex.now() |
||||||
|
|
||||||
|
assert RSK.supply_for_days(2) == |
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
date(now, days: -2) => dec(0), |
||||||
|
date(now, days: -1) => dec(0), |
||||||
|
date(now) => dec(0) |
||||||
|
}} |
||||||
|
end |
||||||
|
|
||||||
|
test "when there is a single balance before the days, that balance is used" do |
||||||
|
address = insert(:address, hash: @coin_address) |
||||||
|
now = Timex.now() |
||||||
|
|
||||||
|
insert(:block, number: 0, timestamp: Timex.shift(now, days: -10)) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 10, address_hash: address.hash, block_number: 0) |
||||||
|
|
||||||
|
assert RSK.supply_for_days(2) == |
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
date(now, days: -2) => dec(10), |
||||||
|
date(now, days: -1) => dec(10), |
||||||
|
date(now) => dec(10) |
||||||
|
}} |
||||||
|
end |
||||||
|
|
||||||
|
test "when there is a balance for one of the days, days after it use that balance" do |
||||||
|
address = insert(:address, hash: @coin_address) |
||||||
|
now = Timex.now() |
||||||
|
|
||||||
|
insert(:block, number: 0, timestamp: Timex.shift(now, days: -10)) |
||||||
|
insert(:block, number: 1, timestamp: Timex.shift(now, days: -1)) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 10, address_hash: address.hash, block_number: 0) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 20, address_hash: address.hash, block_number: 1) |
||||||
|
|
||||||
|
assert RSK.supply_for_days(2) == |
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
date(now, days: -2) => dec(10), |
||||||
|
date(now, days: -1) => dec(20), |
||||||
|
date(now) => dec(20) |
||||||
|
}} |
||||||
|
end |
||||||
|
|
||||||
|
test "when there is a balance for the first day, that balance is used" do |
||||||
|
address = insert(:address, hash: @coin_address) |
||||||
|
now = Timex.now() |
||||||
|
|
||||||
|
insert(:block, number: 0, timestamp: Timex.shift(now, days: -10)) |
||||||
|
insert(:block, number: 1, timestamp: Timex.shift(now, days: -2)) |
||||||
|
insert(:block, number: 2, timestamp: Timex.shift(now, days: -1)) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 5, address_hash: address.hash, block_number: 0) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 10, address_hash: address.hash, block_number: 1) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 20, address_hash: address.hash, block_number: 2) |
||||||
|
|
||||||
|
assert RSK.supply_for_days(2) == |
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
date(now, days: -2) => dec(10), |
||||||
|
date(now, days: -1) => dec(20), |
||||||
|
date(now) => dec(20) |
||||||
|
}} |
||||||
|
end |
||||||
|
|
||||||
|
test "when there is a balance for all days, they are each used correctly" do |
||||||
|
address = insert(:address, hash: @coin_address) |
||||||
|
now = Timex.now() |
||||||
|
|
||||||
|
insert(:block, number: 0, timestamp: Timex.shift(now, days: -10)) |
||||||
|
insert(:block, number: 1, timestamp: Timex.shift(now, days: -2)) |
||||||
|
insert(:block, number: 2, timestamp: Timex.shift(now, days: -1)) |
||||||
|
insert(:block, number: 3, timestamp: now) |
||||||
|
|
||||||
|
insert(:fetched_balance, value: 5, address_hash: address.hash, block_number: 0) |
||||||
|
insert(:fetched_balance, value: 10, address_hash: address.hash, block_number: 1) |
||||||
|
insert(:fetched_balance, value: 20, address_hash: address.hash, block_number: 2) |
||||||
|
insert(:fetched_balance, value: 30, address_hash: address.hash, block_number: 3) |
||||||
|
|
||||||
|
assert RSK.supply_for_days(2) == |
||||||
|
{:ok, |
||||||
|
%{ |
||||||
|
date(now, days: -2) => dec(10), |
||||||
|
date(now, days: -1) => dec(20), |
||||||
|
date(now) => dec(30) |
||||||
|
}} |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,90 @@ |
|||||||
|
defmodule Explorer.Market.MarketHistoryCacheTest do |
||||||
|
use Explorer.DataCase |
||||||
|
|
||||||
|
alias Explorer.Market |
||||||
|
alias Explorer.Market.MarketHistoryCache |
||||||
|
|
||||||
|
setup do |
||||||
|
Supervisor.terminate_child(Explorer.Supervisor, {ConCache, MarketHistoryCache.cache_name()}) |
||||||
|
Supervisor.restart_child(Explorer.Supervisor, {ConCache, MarketHistoryCache.cache_name()}) |
||||||
|
|
||||||
|
on_exit(fn -> |
||||||
|
Supervisor.terminate_child(Explorer.Supervisor, {ConCache, Explorer.Chain.BlocksCache.cache_name()}) |
||||||
|
Supervisor.restart_child(Explorer.Supervisor, {ConCache, Explorer.Chain.BlocksCache.cache_name()}) |
||||||
|
end) |
||||||
|
|
||||||
|
:ok |
||||||
|
end |
||||||
|
|
||||||
|
describe "fetch/1" do |
||||||
|
test "caches data on the first call" do |
||||||
|
today = Date.utc_today() |
||||||
|
|
||||||
|
records = |
||||||
|
for i <- 0..29 do |
||||||
|
%{ |
||||||
|
date: Timex.shift(today, days: i * -1), |
||||||
|
closing_price: Decimal.new(1), |
||||||
|
opening_price: Decimal.new(1) |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
Market.bulk_insert_history(records) |
||||||
|
|
||||||
|
refute fetch_data() |
||||||
|
|
||||||
|
assert Enum.count(MarketHistoryCache.fetch()) == 30 |
||||||
|
|
||||||
|
assert fetch_data() == records |
||||||
|
end |
||||||
|
|
||||||
|
test "updates cache if cache is stale" do |
||||||
|
today = Date.utc_today() |
||||||
|
|
||||||
|
stale_records = |
||||||
|
for i <- 0..29 do |
||||||
|
%{ |
||||||
|
date: Timex.shift(today, days: i * -1), |
||||||
|
closing_price: Decimal.new(1), |
||||||
|
opening_price: Decimal.new(1) |
||||||
|
} |
||||||
|
end |
||||||
|
|
||||||
|
Market.bulk_insert_history(stale_records) |
||||||
|
|
||||||
|
MarketHistoryCache.fetch() |
||||||
|
|
||||||
|
stale_updated_at = fetch_updated_at() |
||||||
|
|
||||||
|
assert fetch_data() == stale_records |
||||||
|
|
||||||
|
ConCache.put(MarketHistoryCache.cache_name(), MarketHistoryCache.updated_at_key(), 1) |
||||||
|
|
||||||
|
fetch_data() |
||||||
|
|
||||||
|
assert stale_updated_at != fetch_updated_at() |
||||||
|
end |
||||||
|
end |
||||||
|
|
||||||
|
defp fetch_updated_at do |
||||||
|
ConCache.get(MarketHistoryCache.cache_name(), MarketHistoryCache.updated_at_key()) |
||||||
|
end |
||||||
|
|
||||||
|
defp fetch_data do |
||||||
|
MarketHistoryCache.cache_name() |
||||||
|
|> ConCache.get(MarketHistoryCache.data_key()) |
||||||
|
|> case do |
||||||
|
nil -> |
||||||
|
nil |
||||||
|
|
||||||
|
records -> |
||||||
|
Enum.map(records, fn record -> |
||||||
|
%{ |
||||||
|
date: record.date, |
||||||
|
closing_price: record.closing_price, |
||||||
|
opening_price: record.opening_price |
||||||
|
} |
||||||
|
end) |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -1,323 +1,19 @@ |
|||||||
<p align="center"> |
# BlockScout |
||||||
<a href="https://blockscout.com"> |
|
||||||
<img width="200" src="https://blockscout.com/eth/mainnet/android-chrome-192x192.png" \> |
|
||||||
</a> |
|
||||||
</p> |
|
||||||
|
|
||||||
<h1 align="center">BlockScout</h1> |
|
||||||
<p align="center">Blockchain Explorer for inspecting and analyzing EVM Chains.</p> |
|
||||||
<div align="center"> |
|
||||||
|
|
||||||
[![CircleCI](https://circleci.com/gh/poanetwork/blockscout.svg?style=svg&circle-token=f8823a3d0090407c11f87028c73015a331dbf604)](https://circleci.com/gh/poanetwork/blockscout) [![Coverage Status](https://coveralls.io/repos/github/poanetwork/blockscout/badge.svg?branch=master)](https://coveralls.io/github/poanetwork/blockscout?branch=master) [![Join the chat at https://gitter.im/poanetwork/blockscout](https://badges.gitter.im/poanetwork/blockscout.svg)](https://gitter.im/poanetwork/blockscout?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) |
[![CircleCI](https://circleci.com/gh/poanetwork/blockscout.svg?style=svg&circle-token=f8823a3d0090407c11f87028c73015a331dbf604)](https://circleci.com/gh/poanetwork/blockscout) [![Coverage Status](https://coveralls.io/repos/github/poanetwork/blockscout/badge.svg?branch=master)](https://coveralls.io/github/poanetwork/blockscout?branch=master) [![Join the chat at https://gitter.im/poanetwork/blockscout](https://badges.gitter.im/poanetwork/blockscout.svg)](https://gitter.im/poanetwork/blockscout?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) |
||||||
|
|
||||||
</div> |
|
||||||
|
|
||||||
BlockScout provides a comprehensive, easy-to-use interface for users to view, confirm, and inspect transactions on **all EVM** (Ethereum Virtual Machine) blockchains. This includes the Ethereum main and test networks as well as **Ethereum forks and sidechains**. |
BlockScout provides a comprehensive, easy-to-use interface for users to view, confirm, and inspect transactions on **all EVM** (Ethereum Virtual Machine) blockchains. This includes the Ethereum main and test networks as well as **Ethereum forks and sidechains**. |
||||||
|
|
||||||
Following is an overview of the project and instructions for [getting started](#getting-started). |
## Features |
||||||
|
|
||||||
Visit the [POA BlockScout forum](https://forum.poa.network/c/blockscout) for additional deployment instructions, FAQs, troubleshooting, and other BlockScout related items. You can also post and answer questions here. |
|
||||||
|
|
||||||
You can also access the dev chatroom on our [Gitter Channel](https://gitter.im/poanetwork/blockscout). |
|
||||||
|
|
||||||
## About BlockScout |
|
||||||
|
|
||||||
BlockScout is an Elixir application that allows users to search transactions, view accounts and balances, and verify smart contracts on the entire Ethereum network including all forks and sidechains. |
|
||||||
|
|
||||||
Currently available block explorers (i.e. Etherscan and Etherchain) are closed systems which are not independently verifiable. As Ethereum sidechains continue to proliferate in both private and public settings, transparent tools are needed to analyze and validate transactions. |
|
||||||
|
|
||||||
|
|
||||||
### Features |
|
||||||
|
|
||||||
- [x] **Open source development**: The code is community driven and available for anyone to use, explore and improve. |
|
||||||
|
|
||||||
- [x] **Real time transaction tracking**: Transactions are updated in real time - no page refresh required. Infinite scrolling is also enabled. |
|
||||||
|
|
||||||
- [x] **Smart contract interaction**: Users can read and verify Solidity smart contracts and access pre-existing contracts to fast-track development. Support for Vyper, LLL, and Web Assembly contracts is in progress. |
|
||||||
|
|
||||||
- [x] **Token support**: ERC20 and ERC721 tokens are supported. Future releases will support additional token types including ERC223 and ERC1155. |
|
||||||
|
|
||||||
- [x] **User customization**: Users can easily deploy on a network and customize the Bootstrap interface. |
|
||||||
|
|
||||||
- [x] **Ethereum sidechain networks**: BlockScout supports the Ethereum mainnet, Ethereum testnets, POA network, and forks like Ethereum Classic, xDAI, additional sidechains, and private EVM networks. |
|
||||||
|
|
||||||
### Supported Projects |
|
||||||
|
|
||||||
| **Hosted Mainnets** | **Hosted Testnets** | **Additional Chains using BlockScout** | |
|
||||||
|--------------------------------------------------------|-------------------------------------------------------|----------------------------------------------------| |
|
||||||
| [Aerum](https://blockscout.com/aerum/mainnet) | [Goerli Testnet](https://blockscout.com/eth/goerli) | [ARTIS](https://explorer.sigma1.artis.network) | |
|
||||||
| [Callisto](https://blockscout.com/callisto/mainnet) | [Kovan Testnet](https://blockscout.com/eth/kovan) | [Ether-1](https://blocks.ether1.wattpool.net/) | |
|
||||||
| [Ethereum Classic](https://blockscout.com/etc/mainnet) | [POA Sokol Testnet](https://blockscout.com/poa/sokol) | [Fuse Network](https://explorer.fuse.io/) | |
|
||||||
| [Ethereum Mainnet](https://blockscout.com/eth/mainnet) | [Rinkeby Testnet](https://blockscout.com/eth/rinkeby) | [Oasis Labs](https://blockexplorer.oasiscloud.io/) | |
|
||||||
| [POA Core Network](https://blockscout.com/poa/core) | [Ropsten Testnet](https://blockscout.com/eth/ropsten) | [Petrichor](https://explorer.petrachor.com/) | |
|
||||||
| [RSK](https://blockscout.com/rsk/mainnet) | | [PIRL](http://pirl.es/) | |
|
||||||
| [xDai Chain](https://blockscout.com/poa/dai) | | [SafeChain](https://explorer.safechain.io) | |
|
||||||
| | | [SpringChain](https://explorer.springrole.com/) | |
|
||||||
| | | [Kotti Testnet](https://kottiexplorer.ethernode.io/) | |
|
||||||
|
|
||||||
|
|
||||||
### Visual Interface |
|
||||||
|
|
||||||
Interface for the POA network _updated 02/2019_ |
|
||||||
|
|
||||||
![BlockScout Example](explorer_example_2_2019.gif) |
|
||||||
|
|
||||||
|
|
||||||
### Umbrella Project Organization |
|
||||||
|
|
||||||
This repository is an [umbrella project](https://elixir-lang.org/getting-started/mix-otp/dependencies-and-umbrella-projects.html). Each directory under `apps/` is a separate [Mix](https://hexdocs.pm/mix/Mix.html) project and [OTP application](https://hexdocs.pm/elixir/Application.html), but the projects can use each other as a dependency in their `mix.exs`. |
|
||||||
|
|
||||||
Each OTP application has a restricted domain. |
|
||||||
|
|
||||||
| Directory | OTP Application | Namespace | Purpose | |
|
||||||
|:------------------------|:--------------------|:------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |
|
||||||
| `apps/ethereum_jsonrpc` | `:ethereum_jsonrpc` | `EthereumJSONRPC` | Ethereum JSONRPC client. It is allowed to know `Explorer`'s param format, but it cannot directly depend on `:explorer` | |
|
||||||
| `apps/explorer` | `:explorer` | `Explorer` | Storage for the indexed chain. Can read and write to the backing storage. MUST be able to boot in a read-only mode when run independently from `:indexer`, so cannot depend on `:indexer` as that would start `:indexer` indexing. | |
|
||||||
| `apps/block_scout_web` | `:block_scout_web` | `BlockScoutWeb` | Phoenix interface to `:explorer`. The minimum interface to allow web access should go in `:block_scout_web`. Any business rules or interface not tied directly to `Phoenix` or `Plug` should go in `:explorer`. MUST be able to boot in a read-only mode when run independently from `:indexer`, so cannot depend on `:indexer` as that would start `:indexer` indexing. | |
|
||||||
| `apps/indexer` | `:indexer` | `Indexer` | Uses `:ethereum_jsonrpc` to index chain and batch import data into `:explorer`. Any process, `Task`, or `GenServer` that automatically reads from the chain and writes to `:explorer` should be in `:indexer`. This restricts automatic writes to `:indexer` and read-only mode can be achieved by not running `:indexer`. | |
|
||||||
|
|
||||||
|
|
||||||
## Getting Started |
|
||||||
|
|
||||||
### Requirements |
|
||||||
|
|
||||||
| Dependency | Mac | Linux | |
|
||||||
|-------------|-----|-------| |
|
||||||
| [Erlang/OTP 21.0.4](https://github.com/erlang/otp) | `brew install erlang` | [Erlang Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L134) | |
|
||||||
| [Elixir 1.8.1](https://elixir-lang.org/) | :point_up: | [Elixir Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L138) | |
|
||||||
| [Postgres 10.3](https://www.postgresql.org/) | `brew install postgresql` | [Postgres Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L187) | |
|
||||||
| [Node.js 10.x.x](https://nodejs.org/en/) | `brew install node` | [Node.js Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L66) | |
|
||||||
| [Automake](https://www.gnu.org/software/automake/) | `brew install automake` | [Automake Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L72) | |
|
||||||
| [Libtool](https://www.gnu.org/software/libtool/) | `brew install libtool` | [Libtool Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L62) | |
|
||||||
| [Inotify-tools](https://github.com/rvoicilas/inotify-tools/wiki) | Not Required | Ubuntu - `apt-get install inotify-tools` | |
|
||||||
| [GCC Compiler](https://gcc.gnu.org/) | `brew install gcc` | [GCC Compiler Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L70) | |
|
||||||
| [GMP](https://gmplib.org/) | `brew install gmp` | [Install GMP Devel](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L74) | |
|
||||||
|
|
||||||
### Build and Run |
|
||||||
|
|
||||||
#### Playbook Deployment |
|
||||||
|
|
||||||
We use [Ansible](https://docs.ansible.com/ansible/latest/index.html) & [Terraform](https://www.terraform.io/intro/getting-started/install.html) to build the correct infrastructure to run BlockScout. See [https://github.com/poanetwork/blockscout-terraform](https://github.com/poanetwork/blockscout-terraform) for details and instructions. |
|
||||||
|
|
||||||
#### Manual Deployment |
|
||||||
|
|
||||||
See [Manual BlockScout Deployment](https://forum.poa.network/t/manual-blockscout-deployment/2458) for instructions. |
|
||||||
|
|
||||||
#### Environment Variables |
|
||||||
|
|
||||||
Our forum contains a [full list of BlockScout environment variables](https://forum.poa.network/t/faq-blockscout-environment-variables/1814). |
|
||||||
|
|
||||||
#### Configuring EVM Chains |
|
||||||
|
|
||||||
* **CSS:** Update the import instruction in `apps/block_scout_web/assets/css/theme/_variables.scss` to select a preset css file. This is reflected in the `production-${chain}` branch for each instance. For example, in the `production-xdai` branch, it is set to `@import "dai-variables"`. |
|
||||||
|
|
||||||
* **ENV:** Update the [environment variables](https://forum.poa.network/t/faq-blockscout-environment-variables/1814) to match the chain specs. |
|
||||||
|
|
||||||
#### Automating Restarts |
|
||||||
|
|
||||||
By default `blockscout` does not restart if it crashes. To enable automated |
|
||||||
restarts, set the environment variable `HEART_COMMAND` to whatever command you run to start `blockscout`. Configure the heart beat timeout to change how long it waits before considering the application unresponsive. At that point, it will kill the current blockscout instance and execute the `HEART_COMMAND`. By default a crash dump is not written unless you set `ERL_CRASH_DUMP_SECONDS` to a positive or negative integer. See the [heart](http://erlang.org/doc/man/heart.html) documentation for more information. |
|
||||||
|
|
||||||
|
|
||||||
#### CircleCI Updates |
|
||||||
|
|
||||||
To monitor build status, configure your local [CCMenu](http://ccmenu.org/) with the following url: [`https://circleci.com/gh/poanetwork/blockscout.cc.xml?circle-token=f8823a3d0090407c11f87028c73015a331dbf604`](https://circleci.com/gh/poanetwork/blockscout.cc.xml?circle-token=f8823a3d0090407c11f87028c73015a331dbf604) |
|
||||||
|
|
||||||
|
|
||||||
## Testing |
|
||||||
|
|
||||||
### Requirements |
|
||||||
|
|
||||||
* PhantomJS (for wallaby) |
|
||||||
|
|
||||||
### Running the tests |
|
||||||
|
|
||||||
1. Build the assets. |
|
||||||
`cd apps/block_scout_web/assets && npm run build; cd -` |
|
||||||
|
|
||||||
2. Format the Elixir code. |
|
||||||
`mix format` |
|
||||||
|
|
||||||
3. Run the test suite with coverage for whole umbrella project. This step can be run with different configuration outlined below. |
|
||||||
`mix coveralls.html --umbrella` |
|
||||||
|
|
||||||
4. Lint the Elixir code. |
|
||||||
`mix credo --strict` |
|
||||||
|
|
||||||
5. Run the dialyzer. |
|
||||||
`mix dialyzer --halt-exit-status` |
|
||||||
|
|
||||||
6. Check the Elixir code for vulnerabilities. |
|
||||||
`cd apps/explorer && mix sobelow --config; cd -` |
|
||||||
`cd apps/block_scout_web && mix sobelow --config; cd -` |
|
||||||
|
|
||||||
7. Lint the JavaScript code. |
|
||||||
`cd apps/block_scout_web/assets && npm run eslint; cd -` |
|
||||||
|
|
||||||
8. Test the JavaScript code. |
|
||||||
`cd apps/block_scout_web/assets && npm run test; cd -` |
|
||||||
|
|
||||||
#### Parity |
|
||||||
|
|
||||||
##### Mox |
|
||||||
|
|
||||||
**This is the default setup. `mix coveralls.html --umbrella` will work on its own, but to be explicit, use the following setup**: |
|
||||||
|
|
||||||
```shell |
|
||||||
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Parity.Mox |
|
||||||
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Mox |
|
||||||
mix coveralls.html --umbrella --exclude no_parity |
|
||||||
``` |
|
||||||
|
|
||||||
##### HTTP / WebSocket |
|
||||||
|
|
||||||
```shell |
|
||||||
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Parity.HTTPWebSocket |
|
||||||
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Parity |
|
||||||
mix coveralls.html --umbrella --exclude no_parity |
|
||||||
``` |
|
||||||
|
|
||||||
| Protocol | URL | |
|
||||||
|:----------|:-----------------------------------| |
|
||||||
| HTTP | `http://localhost:8545` | |
|
||||||
| WebSocket | `ws://localhost:8546` | |
|
||||||
|
|
||||||
#### Geth |
|
||||||
|
|
||||||
##### Mox |
|
||||||
|
|
||||||
```shell |
|
||||||
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Geth.Mox |
|
||||||
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Mox |
|
||||||
mix coveralls.html --umbrella --exclude no_geth |
|
||||||
``` |
|
||||||
|
|
||||||
##### HTTP / WebSocket |
|
||||||
|
|
||||||
```shell |
|
||||||
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Geth.HTTPWebSocket |
|
||||||
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Geth |
|
||||||
mix coveralls.html --umbrella --exclude no_geth |
|
||||||
``` |
|
||||||
|
|
||||||
| Protocol | URL | |
|
||||||
|:----------|:--------------------------------------------------| |
|
||||||
| HTTP | `https://mainnet.infura.io/8lTvJTKmHPCHazkneJsY` | |
|
||||||
| WebSocket | `wss://mainnet.infura.io/ws/8lTvJTKmHPCHazkneJsY` | |
|
||||||
|
|
||||||
### API Documentation |
|
||||||
|
|
||||||
To view Modules and API Reference documentation: |
|
||||||
|
|
||||||
1. Generate documentation. |
|
||||||
`mix docs` |
|
||||||
2. View the generated docs. |
|
||||||
`open doc/index.html` |
|
||||||
|
|
||||||
## Front-end |
|
||||||
|
|
||||||
### Javascript |
|
||||||
|
|
||||||
All Javascript files are under [apps/block_scout_web/assets/js](https://github.com/poanetwork/blockscout/tree/master/apps/block_scout_web/assets/js) and the main file is [app.js](https://github.com/poanetwork/blockscout/blob/master/apps/block_scout_web/assets/js/app.js). This file imports all javascript used in the application. If you want to create a new JS file consider creating into [/js/pages](https://github.com/poanetwork/blockscout/tree/master/apps/block_scout_web/assets/js/pages) or [/js/lib](https://github.com/poanetwork/blockscout/tree/master/apps/block_scout_web/assets/js/lib), as follows: |
|
||||||
|
|
||||||
#### js/lib |
|
||||||
This folder contains all scripts that can be reused in any page or can be used as a helper to some component. |
|
||||||
|
|
||||||
#### js/pages |
|
||||||
This folder contains the scripts that are specific for some page. |
|
||||||
|
|
||||||
#### Redux |
|
||||||
This project uses Redux to control the state in some pages. There are pages that have things happening in real-time thanks to the Phoenix channels, e.g. Address page, so the page state changes a lot depending on which events it is listening. The redux is also used to load some contents asynchronous, see [async_listing_load.js](https://github.com/poanetwork/blockscout/blob/master/apps/block_scout_web/assets/js/lib/async_listing_load.js). |
|
||||||
|
|
||||||
To understand how to build new pages that need redux in this project, see the [redux_helpers.js](https://github.com/poanetwork/blockscout/blob/master/apps/block_scout_web/assets/js/lib/redux_helpers.js) |
|
||||||
|
|
||||||
## Internationalization |
|
||||||
|
|
||||||
The app is currently internationalized. It is only localized to U.S. English. To translate new strings. |
|
||||||
|
|
||||||
1. To setup translation file. |
|
||||||
`cd apps/block_scout_web; mix gettext.extract --merge; cd -` |
|
||||||
2. To edit the new strings, go to `apps/block_scout_web/priv/gettext/en/LC_MESSAGES/default.po`. |
|
||||||
|
|
||||||
## Metrics |
|
||||||
|
|
||||||
BlockScout is setup to export [Prometheus](https://prometheus.io/) metrics at `/metrics`. |
|
||||||
|
|
||||||
### Prometheus |
|
||||||
|
|
||||||
1. Install prometheus: `brew install prometheus` |
|
||||||
2. Start the web server `iex -S mix phx.server` |
|
||||||
3. Start prometheus: `prometheus --config.file=prometheus.yml` |
|
||||||
|
|
||||||
### Grafana |
|
||||||
|
|
||||||
1. Install grafana: `brew install grafana` |
|
||||||
2. Install Pie Chart panel plugin: `grafana-cli plugins install grafana-piechart-panel` |
|
||||||
3. Start grafana: `brew services start grafana` |
|
||||||
4. Add Prometheus as a Data Source |
|
||||||
1. `open http://localhost:3000/datasources` |
|
||||||
2. Click "+ Add data source" |
|
||||||
3. Put "Prometheus" for "Name" |
|
||||||
4. Change "Type" to "Prometheus" |
|
||||||
5. Set "URL" to "http://localhost:9090" |
|
||||||
6. Set "Scrape Interval" to "10s" |
|
||||||
5. Add the dashboards from https://github.com/deadtrickster/beam-dashboards: |
|
||||||
For each `*.json` file in the repo. |
|
||||||
1. `open http://localhost:3000/dashboard/import` |
|
||||||
2. Copy the contents of the JSON file in the "Or paste JSON" entry |
|
||||||
3. Click "Load" |
|
||||||
6. View the dashboards. (You will need to click-around and use BlockScout for the web-related metrics to show up.) |
|
||||||
|
|
||||||
## Tracing |
|
||||||
|
|
||||||
Blockscout supports tracing via |
|
||||||
[Spandex](http://git@github.com:spandex-project/spandex.git). Each application |
|
||||||
has its own tracer, that is configured internally to that application. In order |
|
||||||
to enable it, visit each application's `config/<env>.ex` and update its tracer |
|
||||||
configuration to change `disabled?: true` to `disabled?: false`. Do this for |
|
||||||
each application you'd like included in your trace data. |
|
||||||
|
|
||||||
Currently, only [Datadog](https://www.datadoghq.com/) is supported as a |
|
||||||
tracing backend, but more will be added soon. |
|
||||||
|
|
||||||
### DataDog |
|
||||||
|
|
||||||
If you would like to use DataDog, after enabling `Spandex`, set |
|
||||||
`"DATADOG_HOST"` and `"DATADOG_PORT"` environment variables to the |
|
||||||
host/port that your Datadog agent is running on. For more information on |
|
||||||
Datadog and the Datadog agent, see their |
|
||||||
[documentation](https://docs.datadoghq.com/). |
|
||||||
|
|
||||||
### Other |
|
||||||
|
|
||||||
If you want to use a different backend, remove the |
|
||||||
`SpandexDatadog.ApiServer` `Supervisor.child_spec` from |
|
||||||
`Explorer.Application` and follow any instructions provided in `Spandex` |
|
||||||
for setting up that backend. |
|
||||||
|
|
||||||
## Memory Usage |
|
||||||
|
|
||||||
The work queues for building the index of all blocks, balances (coin and token), and internal transactions can grow quite large. By default, the soft-limit is 1 GiB, which can be changed in `apps/indexer/config/config.exs`: |
|
||||||
|
|
||||||
``` |
|
||||||
config :indexer, memory_limit: 1 <<< 30 |
|
||||||
``` |
|
||||||
|
|
||||||
Memory usage is checked once per minute. If the soft-limit is reached, the shrinkable work queues will shed half their load. The shed load will be restored from the database, the same as when a restart of the server occurs, so rebuilding the work queue will be slower, but use less memory. |
|
||||||
|
|
||||||
If all queues are at their minimum size, then no more memory can be reclaimed and an error will be logged. |
|
||||||
|
|
||||||
## Acknowledgements |
|
||||||
|
|
||||||
We would like to thank the [EthPrize foundation](http://ethprize.io/) for their funding support. |
|
||||||
|
|
||||||
## Contributing |
- **Open source development**: The code is community driven and available for anyone to use, explore and improve. |
||||||
|
|
||||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for contribution and pull request protocol. We expect contributors to follow our [code of conduct](CODE_OF_CONDUCT.md) when submitting code or comments. |
- **Real time transaction tracking**: Transactions are updated in real time - no page refresh required. Infinite scrolling is also enabled. |
||||||
|
|
||||||
|
- **Smart contract interaction**: Users can read and verify Solidity smart contracts and access pre-existing contracts to fast-track development. Support for Vyper, LLL, and Web Assembly contracts is in progress. |
||||||
|
|
||||||
## License |
- **Token support**: ERC20 and ERC721 tokens are supported. Future releases will support additional token types including ERC223 and ERC1155. |
||||||
|
|
||||||
[![License: GPL v3.0](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) |
- **User customization**: Users can easily deploy on a network and customize the Bootstrap interface. |
||||||
|
|
||||||
This project is licensed under the GNU General Public License v3.0. See the [LICENSE](LICENSE) file for details. |
- **Ethereum sidechain networks**: BlockScout supports the Ethereum mainnet, Ethereum testnets, POA network, and forks like Ethereum Classic, xDAI, additional sidechains, and private EVM networks. |
||||||
|
After Width: | Height: | Size: 94 KiB |
After Width: | Height: | Size: 100 KiB |
After Width: | Height: | Size: 101 KiB |
After Width: | Height: | Size: 184 KiB |
After Width: | Height: | Size: 300 KiB |
After Width: | Height: | Size: 639 KiB |
After Width: | Height: | Size: 132 KiB |
After Width: | Height: | Size: 96 KiB |
After Width: | Height: | Size: 94 KiB |
After Width: | Height: | Size: 65 KiB |
After Width: | Height: | Size: 177 KiB |
@ -0,0 +1,38 @@ |
|||||||
|
<!-- _sidebar.md --> |
||||||
|
|
||||||
|
- About BlockScout |
||||||
|
|
||||||
|
- [About](about.md) |
||||||
|
- [Projects Using BlockScout](projects.md) |
||||||
|
- [Umbrella Project Organization](umbrella.md) |
||||||
|
|
||||||
|
- Installation & Configuration |
||||||
|
|
||||||
|
- [Requirements](requirements.md) |
||||||
|
- [Ansible Deployment](ansible-deployment.md) |
||||||
|
- [Manual Deployment](manual-deployment.md) |
||||||
|
- [ENV Variables](env-variables.md) |
||||||
|
- [Configuration Options](dev-env.md) |
||||||
|
- [Chain Configuration](chain-configs.md) |
||||||
|
- [Automating Restarts](restarts.md) |
||||||
|
- [Front End](front-end.md) |
||||||
|
- [CircleCI Configs](circleci.md) |
||||||
|
- [Testing](testing.md) |
||||||
|
- [Internationalization](internationalization.md) |
||||||
|
- [Metrics](metrics.md) |
||||||
|
- [Tracing](tracing.md) |
||||||
|
- [Memory Usage](memory-usage.md) |
||||||
|
- [API Docs](api.md) |
||||||
|
- [Upgrading](upgrading.md) |
||||||
|
|
||||||
|
- User Guide |
||||||
|
|
||||||
|
- [Search Terminology](terminology.md) |
||||||
|
- [Smart Contract Verification](smart-contract.md) |
||||||
|
- [FAQs](faqs.md) |
||||||
|
|
||||||
|
- Resources |
||||||
|
- [POA BlockScout Forum & FAQs](https://forum.poa.network/c/blockscout) |
||||||
|
- [Gitter Channel](https://gitter.im/poanetwork/blockscout) |
||||||
|
- [Twitter](https://twitter.com/_blockscout/) |
||||||
|
- [Github Repo](https://github.com/poanetwork/blockscout) |
@ -0,0 +1,30 @@ |
|||||||
|
<!-- about.md --> |
||||||
|
|
||||||
|
## About BlockScout |
||||||
|
|
||||||
|
BlockScout is an Elixir application that allows users to search transactions, view accounts and balances, and verify smart contracts on the entire Ethereum network including all forks and sidechains. |
||||||
|
|
||||||
|
Currently available block explorers (i.e. Etherscan and Etherchain) are closed systems which are not independently verifiable. As Ethereum sidechains continue to proliferate in both private and public settings, transparent tools are needed to analyze and validate transactions. |
||||||
|
|
||||||
|
Information on the latest release and version history is available [on our forum](https://forum.poa.network/c/blockscout/releases) |
||||||
|
|
||||||
|
## Visual Interface |
||||||
|
|
||||||
|
![POA BlockScout](_media/screenshot_06_2019.png) |
||||||
|
|
||||||
|
Interface for the POA network: v2.0 _updated 06/2019_ |
||||||
|
|
||||||
|
## Acknowledgements |
||||||
|
|
||||||
|
We would like to thank the [EthPrize foundation](http://ethprize.io/) for their funding support. |
||||||
|
|
||||||
|
## Contributing |
||||||
|
|
||||||
|
See [CONTRIBUTING.md](https://github.com/poanetwork/blockscout/blob/master/CONTRIBUTING.md) for contribution and pull request protocol. We expect contributors to follow our [code of conduct](https://github.com/poanetwork/blockscout/blob/master/CODE_OF_CONDUCT.md) when submitting code or comments. |
||||||
|
|
||||||
|
## License |
||||||
|
|
||||||
|
[![License: GPL v3.0](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) |
||||||
|
|
||||||
|
This project is licensed under the GNU General Public License v3.0. See the [LICENSE](https://github.com/poanetwork/blockscout/blob/master/LICENSE) file for details. |
||||||
|
|
@ -0,0 +1,275 @@ |
|||||||
|
<!--ansible-deployment.md --> |
||||||
|
|
||||||
|
# Playbook Overview |
||||||
|
|
||||||
|
We use [Ansible](https://docs.ansible.com/ansible/latest/index.html) & [Terraform](https://www.terraform.io/intro/getting-started/install.html) to build the correct infrastructure to run BlockScout. |
||||||
|
|
||||||
|
The playbook repository is located at [https://github.com/poanetwork/blockscout-terraform](https://github.com/poanetwork/blockscout-terraform). Currently it only supports [AWS](#AWS-permissions) as a cloud provider. |
||||||
|
|
||||||
|
In the root folder you will find Ansible Playbooks to create all necessary infrastructure to deploy BlockScout. The `lambda` folder also contains a set of scripts that may be useful in your BlockScout infrastructure. |
||||||
|
|
||||||
|
|
||||||
|
1. [Deploying the Infrastructure](#deploying-the-infrastructure). This section describes all the steps to deploy the virtual hardware that is required for production instance of BlockScout. Skip this section if you do have an infrastructure and simply want to install or update your BlockScout. |
||||||
|
2. [Deploying BlockScout](#deploying-blockscout). Follow this section to install or update your BlockScout. |
||||||
|
3. [Destroying Provisioned Infrastructure](#destroying-provisioned-infrastructure). Refer to this section if you want to destroy your BlockScout installation. |
||||||
|
|
||||||
|
|
||||||
|
# Prerequisites |
||||||
|
|
||||||
|
Playbooks relies on Terraform, the stateful infrastructure-as-a-code software tool. It allows you to modify and recreate single and multiple resources depending on your needs. |
||||||
|
|
||||||
|
## Prerequisites for deploying infrastructure |
||||||
|
|
||||||
|
| Dependency name | Installation method | |
||||||
|
| -------------------------------------- | ------------------------------------------------------------ | |
||||||
|
| Ansible >= 2.6 | [Installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) | |
||||||
|
| Terraform >=0.11.11 | [Installation guide](https://learn.hashicorp.com/terraform/getting-started/install.html) | |
||||||
|
| Python >=2.6.0 | `apt install python` | |
||||||
|
| Python-pip | `apt install python-pip` | |
||||||
|
| boto & boto3 & botocore python modules | `pip install boto boto3 botocore` | |
||||||
|
|
||||||
|
## Prerequisites for deploying BlockScout |
||||||
|
|
||||||
|
| Dependency name | Installation method | |
||||||
|
| -------------------------------------- | ------------------------------------------------------------ | |
||||||
|
| Ansible >= 2.7.3 | [Installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) | |
||||||
|
| Terraform >=0.11.11 | [Installation guide](https://learn.hashicorp.com/terraform/getting-started/install.html) | |
||||||
|
| Python >=2.6.0 | `apt install python` | |
||||||
|
| Python-pip | `apt install python-pip` | |
||||||
|
| boto & boto3 & botocore python modules | `pip install boto boto3 botocore` | |
||||||
|
| AWS CLI | `pip install awscli` | |
||||||
|
| All BlockScout prerequisites | [Check here](requirements.md) | |
||||||
|
|
||||||
|
|
||||||
|
# AWS permissions |
||||||
|
|
||||||
|
See our forum for a detailed [AWS settings and setup tutorial](https://forum.poa.network/t/aws-settings-for-blockscout-terraform-deployment/1962). |
||||||
|
|
||||||
|
During deployment you will provide credentials to your AWS account. The deployment process requires a wide set of permissions, so it works best if you specify the administrator account credentials. |
||||||
|
|
||||||
|
However, if you want to restrict the permissions, here is the list of resources which are created during the deployment process: |
||||||
|
|
||||||
|
- An S3 bucket to keep Terraform state files; |
||||||
|
- DynamoDB table to manage Terraform state files leases; |
||||||
|
- An SSH keypair (or you can choose to use one which was already created), this is used with any EC2 hosts; |
||||||
|
- A VPC containing all of the resources provisioned; |
||||||
|
- A public subnet for the app servers, and a private subnet for the database (and Redis for now); |
||||||
|
- An internet gateway to provide internet access for the VPC; |
||||||
|
- An ALB which exposes the app server HTTPS endpoints to the world; |
||||||
|
- A security group to lock down ingress to the app servers to 80/443 + SSH; |
||||||
|
- A security group to allow the ALB to talk to the app servers; |
||||||
|
- A security group to allow the app servers access to the database; |
||||||
|
- An internal DNS zone; |
||||||
|
- A DNS record for the database; |
||||||
|
- An autoscaling group and launch configuration for each chain; |
||||||
|
- A CodeDeploy application and deployment group targeting the corresponding autoscaling groups. |
||||||
|
|
||||||
|
Each configured chain receives its own ASG (autoscaling group) and deployment group. When application updates are pushed to CodeDeploy, all autoscaling groups will deploy the new version using a blue/green strategy. Currently, there is only one EC2 host to run, and the ASG is configured to allow scaling up, but no triggers are set up to actually perform the scaling yet. This is something that may come in the future. |
||||||
|
|
||||||
|
When deployment begins, Ansible creates the S3 bucket and DynamoDB table required for Terraform state management. This ensures that the Terraform state is stored in a centralized location, allowing multiple people to use Terraform on the same infra without interfering with one another. Terraform prevents interference by holding locks (via DynamoDB) against the state data (stored in S3). |
||||||
|
|
||||||
|
# Configuration |
||||||
|
|
||||||
|
The single point of configuration in this script is a `group_vars/all.yml` file. First, copy it from `group_vars/all.yml.example` template by executing `cp group_vars/all.yml.example group_vars/all.yml` command and then modify it via any text editor you want (vim example - `vim group_vars/all.yml`). The subsections describe the variable you may want to adjust. |
||||||
|
|
||||||
|
# Variables |
||||||
|
|
||||||
|
## Common variables |
||||||
|
|
||||||
|
- `aws_access_key` and `aws_secret_key` is a credentials pair that provides access to AWS for the deployer; |
||||||
|
- `backend` variable defines whether deployer should keep state files remote or locally. Set `backend` variable to `true` if you want to save state file to the remote S3 bucket; |
||||||
|
- `upload_config_to_s3` - set to `true` if you want to upload config `all.yml` file to the S3 bucket automatically after the deployment. Will not work if `backend` is set to false; |
||||||
|
- `upload_debug_info_to_s3` - set to `true` if you want to upload full log output to the S3 bucket automatically after the deployment. Will not work if `backend` is set to false. *IMPORTANT*: Locally logs are stored at `log.txt` which is not cleaned automatically. Please, do not forget to clean it manually or using the `clean.yml` playbook; |
||||||
|
- `bucket` represents a globally unique name of the bucket where your configs and state will be stored. It will be created automatically during the deployment; |
||||||
|
- `prefix` - is a unique tag to use for provisioned resources (5 alphanumeric chars or less); |
||||||
|
- `chains` - maps chains to the URLs of HTTP RPC endpoints, an ordinary blockchain node can be used; |
||||||
|
- The `region` should be left at `us-east-1` as some of the other regions fail for different reasons; |
||||||
|
|
||||||
|
*Note*: a chain name SHOULD NOT be more than 5 characters. Otherwise, it will throw an error because the aws load balancer name should not be greater than 32 characters. |
||||||
|
|
||||||
|
## Infrastructure related variables |
||||||
|
|
||||||
|
- `dynamodb_table` represents the name of table that will be used for Terraform state lock management; |
||||||
|
- If `ec2_ssh_key_content` variable is not empty, Terraform will try to create EC2 SSH key with the `ec2_ssh_key_name` name. Otherwise, the existing key with `ec2_ssh_key_name` name will be used; |
||||||
|
- `instance_type` defines a size of the Blockscout instance that will be launched during the deployment process; |
||||||
|
- `vpc_cidr`, `public_subnet_cidr`, `db_subnet_cidr` represents the network configuration for the deployment. Usually you want to leave it as is. However, if you want to modify it, please, expect that `db_subnet_cidr` represents not a single network, but a group of networks started with defined CIDR block increased by 8 bits. |
||||||
|
Example: |
||||||
|
Number of networks: 2 |
||||||
|
`db_subnet_cidr`: "10.0.1.0/16" |
||||||
|
Real networks: 10.0.1.0/24 and 10.0.2.0/24 |
||||||
|
- An internal DNS zone with`dns_zone_name` name will be created to take care of BlockScout internal communications; |
||||||
|
- The name of a IAM key pair to use for EC2 instances, if you provide a name which |
||||||
|
already exists it will be used, otherwise it will be generated for you; |
||||||
|
|
||||||
|
* If `use_ssl` is set to `false`, SSL will be forced on Blockscout. To configure SSL, use `alb_ssl_policy` and `alb_certificate_arn` variables; |
||||||
|
|
||||||
|
- The `root_block_size` is the amount of storage on your EC2 instance. This value can be adjusted by how frequently logs are rotated. Logs are located in `/opt/app/logs` of your EC2 instance; |
||||||
|
- The `pool_size` defines the number of connections allowed by the RDS instance; |
||||||
|
- `secret_key_base` is a random password used for BlockScout internally. It is highly recommended to gernerate your own `secret_key_base` before the deployment. For instance, you can do it via `openssl rand -base64 64 | tr -d '\n'` command; |
||||||
|
- `new_relic_app_name` and `new_relic_license_key` should usually stay empty unless you want and know how to configure New Relic integration; |
||||||
|
- `elixir_version` - is an Elixir version used in BlockScout release; |
||||||
|
- `chain_trace_endpoint` - maps chains to the URLs of HTTP RPC endpoints, which represents a node where state pruning is disabled (archive node) and tracing is enabled. If you don't have a trace endpoint, you can simply copy values from `chains` variable; |
||||||
|
- `chain_ws_endpoint` - maps chains to the URLs of HTTP RPCs that supports websockets. This is required to get the real-time updates. Can be the same as `chains` if websocket is enabled there (but make sure to use`ws(s)` instead of `htpp(s)` protocol); |
||||||
|
- `chain_jsonrpc_variant` - a client used to connect to the network. Can be `parity`, `geth`, etc; |
||||||
|
- `chain_logo` - maps chains to the it logos. Place your own logo at `apps/block_scout_web/assets/static` and specify a relative path at `chain_logo` variable; |
||||||
|
- `chain_coin` - a name of the coin used in each particular chain; |
||||||
|
- `chain_network` - usually, a name of the organization keeping group of networks, but can represent a name of any logical network grouping you want; |
||||||
|
- `chain_subnetwork` - a name of the network to be shown at BlockScout; |
||||||
|
- `chain_network_path` - a relative URL path which will be used as an endpoint for defined chain. For example, if we will have our BlockScout at `blockscout.com` domain and place `core` network at `/poa/core`, then the resulting endpoint will be `blockscout.com/poa/core` for this network. |
||||||
|
- `chain_network_icon` - maps the chain name to the network navigation icon at apps/block_scout_web/lib/block_scout_web/templates/icons without .eex extension |
||||||
|
- `chain_graphiql_transaction` - is a variable that maps chain to a random transaction hash on that chain. This hash will be used to provide a sample query in the GraphIQL Playground. |
||||||
|
- `chain_block_transformer` - will be `clique` for clique networks like Rinkeby and Goerli, and `base` for the rest; |
||||||
|
- `chain_heart_beat_timeout`, `chain_heart_command` - configs for the integrated heartbeat. First describes a timeout after the command described at the second variable will be executed; |
||||||
|
- Each of the `chain_db_*` variables configures the database for each chain. Each chain will have the separate RDS instance. |
||||||
|
- `chain_blockscout_version` - is a text at the footer of BlockScout instance. Usually represents the current BlockScout version. |
||||||
|
|
||||||
|
## Blockscout related variables |
||||||
|
|
||||||
|
- `blockscout_repo` - a direct link to the Blockscout repo; |
||||||
|
- `chain_branch` - maps branch at `blockscout_repo` to each chain; |
||||||
|
- Specify the `chain_merge_commit` variable if you want to merge any of the specified `chains` with the commit in the other branch. Usually may be used to update production branches with the releases from master branch; |
||||||
|
- `skip_fetch` - if this variable is set to `true` , BlockScout repo will not be cloned and the process will start from building the dependencies. Use this variable to prevent playbooks from overriding manual changes in cloned repo; |
||||||
|
- `ps_*` variables represents a connection details to the test Postgres database. This one will not be installed automatically, so make sure `ps_*` credentials are valid before starting the deployment; |
||||||
|
- `chain_custom_environment` - is a map of variables that should be overrided when deploying the new version of Blockscout. Can be omitted. |
||||||
|
|
||||||
|
*Note*: `chain_custom_environment` variables will not be propagated to the Parameter Store at production servers and need to be set there manually. |
||||||
|
|
||||||
|
# Database Storage Required |
||||||
|
|
||||||
|
The configuration variable `db_storage` can be used to define the amount of storage allocated to your RDS instance. The chart below shows an estimated amount of storage that is required to index individual chains. The `db_storage` can only be adjusted 1 time in a 24 hour period on AWS. |
||||||
|
|
||||||
|
| Chain | Storage (GiB) | |
||||||
|
| ---------------- | ------------- | |
||||||
|
| POA Core | 200 | |
||||||
|
| POA Sokol | 400 | |
||||||
|
| Ethereum Classic | 1000 | |
||||||
|
| Ethereum Mainnet | 4000 | |
||||||
|
| Kovan Testnet | 800 | |
||||||
|
| Ropsten Testnet | 1500 | |
||||||
|
|
||||||
|
# Deploying the Infrastructure |
||||||
|
|
||||||
|
1. Ensure all the [infrastructure prerequisites](#Prerequisites-for-deploying-infrastructure) are installed and has the right version number; |
||||||
|
|
||||||
|
2. Create the AWS access key and secret access key for user with [sufficient permissions](#AWS); |
||||||
|
|
||||||
|
3. Merge `infrastructure` and `all` config template files into single config file: |
||||||
|
```bash |
||||||
|
cat group_vars/infrastructure.yml.example group_vars/all.yml.example > group_vars/all.yml |
||||||
|
``` |
||||||
|
|
||||||
|
4. Set the variables at `group_vars/all.yml` config template file as described at the [corresponding part of instruction](#Configuration); |
||||||
|
|
||||||
|
5. Run `ansible-playbook deploy_infra.yml`; |
||||||
|
|
||||||
|
- During the deployment the ["diffs didn't match"](#error-applying-plan-diffs-didnt-match) error may occur, it will be ignored automatically. If Ansible play recap shows 0 failed plays, then the deployment was successful despite the error. |
||||||
|
|
||||||
|
- Optionally, you may want to check the variables the were uploaded to the [Parameter Store](https://console.aws.amazon.com/systems-manager/parameters) at AWS Console. |
||||||
|
|
||||||
|
|
||||||
|
# Deploying BlockScout |
||||||
|
|
||||||
|
1. Ensure all the [BlockScout prerequisites](#Prerequisites-for-deploying-blockscout) are installed and has the right version number; |
||||||
|
2. Merge `blockscout` and `all` config template files into single config file: |
||||||
|
|
||||||
|
```bash |
||||||
|
cat group_vars/blockscout.yml.example group_vars/all.yml.example > group_vars/all.yml |
||||||
|
``` |
||||||
|
**Note!** All three configuration files are compatible to each other, so you can simply `cat group_vars/blockscout.yml.example >> group_vars/all.yml` if you already do have the `all.yml` file after the deploying of infrastructure. |
||||||
|
|
||||||
|
3. Set the variables at `group_vars/all.yml` config template file as described at the [corresponding part of instruction](#Configuration); |
||||||
|
**Note!** Use `chain_custom_environment` to update the variables in each deployment. Map each deployed chain with variables as they should appear at the Parameter Store. Check the example at `group_vars/blockscout.yml.example` config file. `chain_*` variables will be ignored during BlockScout software deployment. |
||||||
|
|
||||||
|
4. This step is for mac OS users. Please skip it, if this is not your case. |
||||||
|
|
||||||
|
To avoid the error |
||||||
|
``` |
||||||
|
TASK [main_software : Fetch environment variables] ************************************ |
||||||
|
objc[12816]: +[__NSPlaceholderDate initialize] may have been in progress in another thread when fork() was called. |
||||||
|
objc[12816]: +[__NSPlaceholderDate initialize] may have been in progress in another thread when fork() was called. We cannot safely call it or ignore it in the fork() child process. Crashing instead. Set a breakpoint on objc_initializeAfterForkError to debug. |
||||||
|
``` |
||||||
|
error and crashing of Python follow the next steps: |
||||||
|
|
||||||
|
- Open terminal: `nano .bash_profile`; |
||||||
|
- Add the following line to the end of the file: `export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES`; |
||||||
|
- Save, exit, close terminal and re-open the terminal. Check to see that the environment variable is now set: `env` |
||||||
|
|
||||||
|
(source: https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr); |
||||||
|
|
||||||
|
5. Run `ansible-playbook deploy_software.yml`; |
||||||
|
6. When the prompt appears, check that server is running and there is no visual artifacts. The server will be launched at port 4000 at the same machine where you run the Ansible playbooks. If you face any errors you can either fix it or cancel the deployment by pressing **Ctrl+C** and then pressing **A** when additionally prompted. |
||||||
|
7. When server is ready to be deployed simply press enter and deployer will upload Blockscout to the appropriate S3. |
||||||
|
8. Two other prompts will appear to ensure your will on updating the Parameter Store variables and deploying the BlockScout through the CodeDeploy. Both **yes** and **true** will be interpreted as the confirmation. |
||||||
|
9. Monitor and manage your deployment at [CodeDeploy](https://console.aws.amazon.com/codesuite/codedeploy/applications) service page at AWS Console. |
||||||
|
|
||||||
|
# Destroying Provisioned Infrastructure |
||||||
|
|
||||||
|
First of all you have to remove autoscaling groups (ASG) deployed via CodeDeploy manually since Terraform doesn't track them and will miss them during the automatic destroy process. Once ASG is deleted you can use `ansible-playbook destroy.yml` playbook to remove the rest of generated infrastructure. Make sure to check the playbook output since in some cases it might not be able to delete everything. Check the error description for details. |
||||||
|
|
||||||
|
**Note!** While Terraform is stateful, Ansible is stateless, so if you modify `bucket` or `dynamodb_table` variables and run `destroy.yml` or `deploy_infra.yml` playbooks, it will not alter the current S3/Dynamo resources names, but create a new resources. Moreover, altering `bucket` variable will make Terraform to forget about existing infrastructure and, as a consequence, redeploy it. If it absolutely necessary for you to alter the S3 or DynamoDB names you can do it manually and then change the appropriate variable accordingly. |
||||||
|
|
||||||
|
Also note, that changing `backend` variable will force Terraform to forget about created infrastructure also, since it will start searching the current state files locally instead of remote. |
||||||
|
|
||||||
|
# Useful information |
||||||
|
|
||||||
|
## Cleaning Deployment cache |
||||||
|
|
||||||
|
Despite the fact that Terraform cache is automatically cleared automatically before each deployment, you may also want to force the cleaning process manually. To do this simply run the `ansible-playbook clean.yml` command, and Terraform cache will be cleared. |
||||||
|
|
||||||
|
## Migrating deployer to another machine |
||||||
|
|
||||||
|
You can easily manipulate your deployment from any machine with sufficient prerequisites. If `upload_debug_info_to_s3` variable is set to true, the deployer will automatically upload your `all.yml` file to the s3 bucket, so you can easily download it to any other machine. Simply download this file to your `group_vars` folder and your new deployer will pick up the current deployment instead of creating a new one. |
||||||
|
|
||||||
|
|
||||||
|
## Attaching the existing RDS instance to the current deployment |
||||||
|
|
||||||
|
In some cases you may want not to create a new database, but to add the existing one to use within the deployment. In order to do that configure all the proper values at `group_vars/all.yml` including yours DB ID and name and execute the `ansible-playbook attach_existing_rds.yml` command. This will add the current DB instance into Terraform-managed resource group. After that run `ansible-playbook deploy_infra.yml` as usually. |
||||||
|
|
||||||
|
**Note 1**: while executing `ansible-playbook attach_existing_rds.yml` the S3 and DynamoDB will be automatically created (if `backend` variable is set to `true`) to store Terraform state files. |
||||||
|
|
||||||
|
**Note 2**: the actual name of your resource must include prefix that you will use in this deployment. |
||||||
|
|
||||||
|
Example: |
||||||
|
|
||||||
|
Real resource: tf-poa |
||||||
|
|
||||||
|
`prefix` variable: tf |
||||||
|
|
||||||
|
`chain_db_id` variable: poa |
||||||
|
|
||||||
|
**Note 3**: make sure MultiAZ is disabled on your database. |
||||||
|
|
||||||
|
**Note 4**: make sure that all the variables at `group_vars/all.yml` are exactly the same as at your existing DB. |
||||||
|
|
||||||
|
## Using AWS CodeDeploy to Mmnitor and manage a BlockScout deployment |
||||||
|
|
||||||
|
BlockScout deployment can be managed through the AWS console. [A brief tutorial is available on our forum](https://forum.poa.network/t/monitor-and-manage-a-blockscout-deployment-using-codedeploy-in-your-aws-console/2499). |
||||||
|
|
||||||
|
# Common Errors and Questions |
||||||
|
|
||||||
|
### S3: 403 error during provisioning |
||||||
|
Usually appears if S3 bucket already exists. Remember, S3 bucket has globally unique name, so if you don't have it, it doesn't mean, that it doesn't exists at all. Login to your AWS console and try to create S3 bucket with the same name you specified at `bucket` variable to ensure. |
||||||
|
|
||||||
|
### Error Applying Plan (diffs didn't match) |
||||||
|
|
||||||
|
If you see something like the following: |
||||||
|
|
||||||
|
``` |
||||||
|
Error: Error applying plan: |
||||||
|
|
||||||
|
1 error(s) occurred: |
||||||
|
|
||||||
|
* module.stack.aws_autoscaling_group.explorer: aws_autoscaling_group.explorer: diffs didn't match during apply. This is a bug with Terraform and should be reported as a GitHub Issue. |
||||||
|
|
||||||
|
Please include the following information in your report: |
||||||
|
|
||||||
|
Terraform Version: 0.11.11 |
||||||
|
Resource ID: aws_autoscaling_group.explorer |
||||||
|
Mismatch reason: attribute mismatch: availability_zones.1252502072 |
||||||
|
``` |
||||||
|
|
||||||
|
This is due to a bug in Terraform, however the fix is to just rerun `ansible-playbook deploy_infra.yml` again, and Terraform will pick up where it left off. This does not always happen, but this is the current workaround if you see it. |
||||||
|
|
||||||
|
### Server doesn't start during deployment |
||||||
|
|
||||||
|
Even if server is configured correctly, sometimes it may not bind the appropriate 4000 port due to unknown reason. If so, simply go to the appropriate nested blockscout folder, kill and rerun server. For example, you can use the following command: `pkill beam.smp && pkill node && sleep 10 && mix phx.server`. |
@ -0,0 +1,67 @@ |
|||||||
|
<!--api.md --> |
||||||
|
|
||||||
|
## BlockScout Internal Documentation |
||||||
|
|
||||||
|
To view Modules and API Reference documentation: |
||||||
|
|
||||||
|
1. Generate documentation. |
||||||
|
`mix docs` |
||||||
|
2. View the generated docs. |
||||||
|
`open doc/index.html` |
||||||
|
|
||||||
|
|
||||||
|
## BlockScout API Usage |
||||||
|
|
||||||
|
Api calls can be accessed from the BlockScout UI menu. BlockScout supports several methods: |
||||||
|
|
||||||
|
1. [Graphiql](https://github.com/graphql/graphiql): An IDE for exploring GraphQL |
||||||
|
2. RPC: API provided for developers transitioning their applications from Etherscan to BlockScout. It supports GET and POST requests. |
||||||
|
|
||||||
|
### Graphiql |
||||||
|
|
||||||
|
Send Queries to quickly get information. Use the Docs button to quickly find arguments accepted by the schema.More information is available in our [BlockScout GraphQL tutorial](https://forum.poa.network/t/graphql-in-blockscout/1971). |
||||||
|
|
||||||
|
![Graphiql](_media/graphiql_screenshot.png) |
||||||
|
|
||||||
|
|
||||||
|
#### Graphiql RootQueryType Fields |
||||||
|
|
||||||
|
* address(hash: AddressHash!): Address<br /> |
||||||
|
Gets an address by hash. |
||||||
|
<br /><br /> |
||||||
|
* addresses(hashes: [AddressHash!]!): [Address]<br /> |
||||||
|
Gets addresses by address hash. |
||||||
|
<br /><br /> |
||||||
|
* block(number: Int!): Block<br /> |
||||||
|
Gets a block by number. |
||||||
|
<br /><br /> |
||||||
|
* node(id: ID!): Node<br /> |
||||||
|
Fetches an object given its ID |
||||||
|
<br /><br /> |
||||||
|
* tokenTransfers(<br /> |
||||||
|
after: String<br /> |
||||||
|
before: String<br /> |
||||||
|
count: Int<br /> |
||||||
|
first: Int<br /> |
||||||
|
last: Int<br /> |
||||||
|
tokenContractAddressHash: AddressHash!<br /> |
||||||
|
): TokenTransferConnection<br /> |
||||||
|
Gets token transfers by token contract address hash. |
||||||
|
<br /><br /> |
||||||
|
* transaction(hash: FullHash!): Transaction<br /> |
||||||
|
Gets a transaction by hash. |
||||||
|
|
||||||
|
#### Example Queries |
||||||
|
|
||||||
|
Blockscout's GraphQL API provides 4 queries and 1 subscription. You can view them in GraphiQL interface under the `Schema` tab. Short query examples: |
||||||
|
|
||||||
|
| Query | Description | Example | |
||||||
|
|-----------------------------------------------|-----------------------------|------------------------------------------------------------------------------------------------------------------------------------------| |
||||||
|
| address(hash: AddressHash!): Address | Gets an address by hash | {address(hash: "0x1fddEc96688e0538A316C64dcFd211c491ECf0d8") {hash, contractCode} } | |
||||||
|
| addresses (hashes: [AddressHash!]): [Address] | Gets addresses by hashes | {addresses(hashes: ["0x1fddEc96688e0538A316C64dcFd211c491ECf0d8", "0x3948c17c0f45017064858b8352580267a85a762c"]) {hash, contractCode} } | |
||||||
|
| block(number: Int!): Block | Gets a block by number | {block(number: 1) {parentHash, size, nonce}} | |
||||||
|
| transaction (hash: FullHash!): Transaction | Gets a transaction by hash. | {transaction(hash: "0xc391da8f433b3bea0b3eb45da40fdd194c7a0e07d1b5ad656bf98940f80a6cf6") {input, gasUsed}} | |
||||||
|
|
||||||
|
|
||||||
|
[Example GraphQL Query to retrieve transactions for a specific address](https://forum.poa.network/t/faq-graphql-query-to-retrieve-transactions-for-a-specific-address/1937) |
||||||
|
|
@ -0,0 +1,33 @@ |
|||||||
|
<!--chain-configs.md --> |
||||||
|
|
||||||
|
## Configuring EVM Chains |
||||||
|
|
||||||
|
* **CSS:** Update the import instruction in `apps/block_scout_web/assets/css/theme/_variables.scss` to select a preset css file. This is reflected in the `production-${chain}` branch for each instance. For example, in the `production-xdai` branch, comment out `@import "neutral_variables` and uncomment `@import "dai-variables"`. |
||||||
|
|
||||||
|
* **ENV:** Update the [environment variables](env-variables.md) to match the chain specs. |
||||||
|
|
||||||
|
### Current css presets |
||||||
|
``` bash |
||||||
|
@import "theme/base_variables"; |
||||||
|
@import "neutral_variables"; |
||||||
|
// @import "dai_variables"; |
||||||
|
// @import "ethereum_classic_variables"; |
||||||
|
// @import "ethereum_variables"; |
||||||
|
// @import "ether1_variables"; |
||||||
|
// @import "expanse_variables"; |
||||||
|
// @import "gochain_variables"; |
||||||
|
// @import "goerli_variables"; |
||||||
|
// @import "kovan_variables"; |
||||||
|
// @import "lukso_variables"; |
||||||
|
// @import "musicoin_variables"; |
||||||
|
// @import "pirl_variables"; |
||||||
|
// @import "poa_variables"; |
||||||
|
// @import "posdao_variables"; |
||||||
|
// @import "rinkeby_variables"; |
||||||
|
// @import "ropsten_variables"; |
||||||
|
// @import "social_variables"; |
||||||
|
// @import "sokol_variables"; |
||||||
|
// @import "tobalaba_variables"; |
||||||
|
// @import "tomochain_variables"; |
||||||
|
// @import "rsk_variables"; |
||||||
|
``` |
@ -0,0 +1,5 @@ |
|||||||
|
<!--circleci.md --> |
||||||
|
|
||||||
|
## CircleCI Updates |
||||||
|
|
||||||
|
To monitor build status, configure your local [CCMenu](http://ccmenu.org/) with the following url: [`https://circleci.com/gh/poanetwork/blockscout.cc.xml?circle-token=f8823a3d0090407c11f87028c73015a331dbf604`](https://circleci.com/gh/poanetwork/blockscout.cc.xml?circle-token=f8823a3d0090407c11f87028c73015a331dbf604) |
@ -0,0 +1,23 @@ |
|||||||
|
<!--dev-env.md --> |
||||||
|
# Configuration Options |
||||||
|
|
||||||
|
- [Chain Configuration](chain-configs.md) |
||||||
|
- [Automating Restarts](restarts.md) |
||||||
|
- [Front End](front-end.md) |
||||||
|
- [CircleCI Configs](circleci.md) |
||||||
|
- [Testing](testing.md) |
||||||
|
- [Internationalization](internationalization.md) |
||||||
|
- [Metrics](metrics.md) |
||||||
|
- [Tracing](tracing.md) |
||||||
|
- [Memory Usage](memory-usage.md) |
||||||
|
- [API Docs](api.md) |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,46 @@ |
|||||||
|
# BlockScout Env Variables |
||||||
|
|
||||||
|
Below is a table outlining the environment variables utilized by BlockScout. |
||||||
|
|
||||||
|
|
||||||
|
| Variable | Required | Description | Default | Version | |
||||||
|
| --- | --- | --- | ---| --- | |
||||||
|
| `NETWORK`| :white_check_mark: | Environment variable for the main EVM network such as Ethereum Network or POA Network | POA Network | all | |
||||||
|
| `SUBNETWORK` | :white_check_mark: | Environment variable for the subnetwork such as Core or Sokol Network | Sokol Testnet | all | |
||||||
|
| `NETWORK_ICON` | :white_check_mark: | Environment variable for the main network icon or testnet icon. Two options are `_test_network_icon.html` and `_network_icon.html` | `_test_network_icon.html` | all | |
||||||
|
| `LOGO` | :white_check_mark: | Environment variable for the logo image location. The logo files names for different chains can be found [here](https://github.com/poanetwork/blockscout/tree/master/apps/block_scout_web/assets/static/images) | /images/blockscout_logo.svg | all | |
||||||
|
| `ETHEREUM_JSONRPC_VARIANT` | :white_check_mark: | This environment variable is used to tell the application which RPC Client the node is using (i.e. Geth, Parity, or Ganache) | parity | all | |
||||||
|
| `ETHEREUM_JSONRPC_HTTP_URL` | :white_check_mark: | The RPC endpoint used to fetch blocks, transactions, receipts, tokens. | localhost:8545 | all | |
||||||
|
| `ETHEREUM_JSONRPC_TRACE_URL` | | The RPC endpoint specifically for the Geth/Parity client used by trace_block and trace_replayTransaction. This can be used to designate a tracing node. | localhost:8545 | all | |
||||||
|
| `ETHEREUM_JSONRPC_WS_URL` | :white_check_mark: | The WebSockets RPC endpoint used to subscribe to the `newHeads` subscription alerting the indexer to fetch new blocks. | ws://localhost:8546 | all | |
||||||
|
| `NETWORK_PATH` | | Used to set a network path other than what is displayed in the root directory. An example would be to add /eth/mainnet/ to the root directory. | (empty) | all | |
||||||
|
| `SECRET_KEY_BASE` | :white_check_mark: | Use mix phx.gen.secret to generate a new Secret Key Base string to protect production assets. | (empty) | all | |
||||||
|
| `CHECK_ORIGIN` | | Used to check the origin of requests when the origin header is present. It defaults to false. In case of true, it will check against the host value. | false | all | |
||||||
|
| `PORT` | :white_check_mark: | Default port the application runs on is 4000 | 4000 | all | |
||||||
|
| `COIN` | :white_check_mark: | The coin here is checked via the Coinmarketcap API to obtain USD prices on graphs and other areas of the UI | POA | all | |
||||||
|
| `METADATA_CONTRACT` | | This environment variable is specifically used by POA Network to obtain Validators information to display in the UI. | (empty) | all | |
||||||
|
| `VALIDATORS_CONTRACT` | | This environment variable is specifically used by POA Network to obtain the Emission Fund contract. | (empty) | all | |
||||||
|
| `SUPPLY_MODULE` | | This environment variable is used by the xDai Chain in order to tell the application how to calculate the total supply of the chain. | false | all | |
||||||
|
| `SOURCE_MODULE` | | This environment variable is used to calculate the total supply and is specifically used by the xDai Chain. | false | all | |
||||||
|
| `DATABASE_URL` | | Production environment variable to define the Database endpoint. | (empty) | all | |
||||||
|
| `POOL_SIZE` | | Production environment variable to define the number of database connections allowed. | 20 | all | |
||||||
|
| `ECTO_USE_SSL`| | Production environment variable to use SSL on Ecto queries. | true | all | |
||||||
|
| `DATADOG_HOST` | | Host configuration setting for [Datadog integration](https://docs.datadoghq.com/integrations/) | (empty) | all | |
||||||
|
| `DATADOG_PORT` | | Port configuration setting for [Datadog integration](https://docs.datadoghq.com/integrations/). | (empty} | all | |
||||||
|
| `SPANDEX_BATCH_SIZE` | | [Spandex](https://github.com/spandex-project/spandex) and Datadog configuration setting. | (empty) | all | |
||||||
|
| `SPANDEX_SYNC_THRESHOLD` | | [Spandex](https://github.com/spandex-project/spandex) and Datadog configuration setting. | (empty) | all | |
||||||
|
| `HEART_BEAT_TIMEOUT` | | Production environment variable to restart the application in the event of a crash. | 30 | all | |
||||||
|
| `HEART_COMMAND` | | Production environment variable to restart the application in the event of a crash. | systemctl restart explorer.service | all | |
||||||
|
| `BLOCKSCOUT_VERSION` | | Added to the footer to signify the current BlockScout version. | (empty) | v1.3.4+ | |
||||||
|
| `RELEASE_LINK` | | The link to Blockscout release notes in the footer. | https://github.com/poanetwork/ <br /> <u>blockscout/releases/</u> <br /> <u>tag/${BLOCKSCOUT_VERSION}</u> | v1.3.5+ | |
||||||
|
| `ELIXIR_VERSION` | | Elixir version to install on the node before Blockscout deploy. | (empty) | all | |
||||||
|
| `BLOCK_TRANSFORMER` | | Transformer for blocks: base or clique. | base | v1.3.4+ | |
||||||
|
| `GRAPHIQL _TRANSACTION` | | Default transaction in query to GraphiQL. | (empty) | v1.3.4+ | |
||||||
|
| `FIRST_BLOCK` | | The block number, where indexing begins from. | 0 | v1.3.8+ | |
||||||
|
| `TXS_COUNT_CACHE_PERIOD` | | Interval in seconds to restart the task, which calculates the total txs count. | 60 * 60 * 2 | v1.3.9+ | |
||||||
|
| `ADDRESS_WITH_BALANCES` <br /> `_UPDATE_INTERVAL`| | Interval in seconds to restart the task, which calculates addresses with balances. | 30 * 60 | v1.3.9+ | |
||||||
|
| `LINK_TO_OTHER_EXPLORERS` | | true/false. If true, links to other explorers are added in the footer | (empty) | v1.3.0+ | |
||||||
|
| `COINMARKETCAP_PAGES` | | the number of pages on coinmarketcap to list in order to find token's price | 10 | v1.3.10+ | |
||||||
|
| `SUPPORTED_CHAINS` | | Array of supported chains that displays in the footer and in the chains dropdown. This var was introduced in this PR [#1900](https://github.com/poanetwork/blockscout/pull/1900) and looks like an array of JSON objects. | (empty) | v2.0.0+ | |
||||||
|
| `BLOCK_COUNT_CACHE_PERIOD ` | | time to live of cache in seconds. This var was introduced in [#1876](https://github.com/poanetwork/blockscout/pull/1876) | 600 | v2.0.0+ | |
||||||
|
| `ALLOWED_EVM_VERSIONS ` | | the comma-separated list of allowed EVM versions for contracts verification. This var was introduced in [#1964](https://github.com/poanetwork/blockscout/pull/1964) | "homestead, tangerineWhistle, spuriousDragon, byzantium, constantinople, petersburg" | v2.0.0+ | |
@ -0,0 +1,10 @@ |
|||||||
|
|
||||||
|
|
||||||
|
!> **Important** notice with `inline code` and additional placeholder text used |
||||||
|
to force the content to wrap and span multiple lines. |
||||||
|
|
||||||
|
> [!NOTE] |
||||||
|
> An alert of type 'note' using global style 'callout'. |
||||||
|
|
||||||
|
> [!NOTE|style:flat] |
||||||
|
> An alert of type 'note' using alert specific style 'flat' which overrides global style 'callout'. |
@ -0,0 +1,3 @@ |
|||||||
|
<!-- faq.md --> |
||||||
|
|
||||||
|
FAQs are located in the [BlockScout forum](https://forum.poa.network/c/blockscout/wiki). |
@ -0,0 +1,18 @@ |
|||||||
|
<!--front-end.md --> |
||||||
|
|
||||||
|
## Front-end |
||||||
|
|
||||||
|
### Javascript |
||||||
|
|
||||||
|
All Javascript files are located in [apps/block_scout_web/assets/js](https://github.com/poanetwork/blockscout/tree/master/apps/block_scout_web/assets/js). The main file is [app.js](https://github.com/poanetwork/blockscout/blob/master/apps/block_scout_web/assets/js/app.js). This file imports all javascript used in the application. If you want to create a new JS file consider creating in [/js/pages](https://github.com/poanetwork/blockscout/tree/master/apps/block_scout_web/assets/js/pages) or [/js/lib](https://github.com/poanetwork/blockscout/tree/master/apps/block_scout_web/assets/js/lib), as follows: |
||||||
|
|
||||||
|
#### js/lib |
||||||
|
This folder contains all scripts usable for any page or as helpers to some component. |
||||||
|
|
||||||
|
#### js/pages |
||||||
|
This folder contains the scripts that are page-specific. |
||||||
|
|
||||||
|
#### Redux |
||||||
|
This project uses Redux to control the state in some pages. There are pages with real-time events that use Phoenix channels, e.g. Address page. The page state changes often depending on which events it is listening to. Redux is also used to load some contents asynchronously, see [async_listing_load.js](https://github.com/poanetwork/blockscout/blob/master/apps/block_scout_web/assets/js/lib/async_listing_load.js). |
||||||
|
|
||||||
|
To understand how to build new pages that require Redux, see the [redux_helpers.js](https://github.com/poanetwork/blockscout/blob/master/apps/block_scout_web/assets/js/lib/redux_helpers.js) file. |
@ -0,0 +1,10 @@ |
|||||||
|
<!--internationalization.md --> |
||||||
|
|
||||||
|
|
||||||
|
## Internationalization |
||||||
|
|
||||||
|
The app is currently internationalized. It is only localized to U.S. English. To translate new strings. |
||||||
|
|
||||||
|
1. To setup translation file. |
||||||
|
`cd apps/block_scout_web; mix gettext.extract --merge; cd -` |
||||||
|
2. To edit the new strings, go to `apps/block_scout_web/priv/gettext/en/LC_MESSAGES/default.po`. |
@ -0,0 +1,85 @@ |
|||||||
|
<!-- manual-deployment.md --> |
||||||
|
|
||||||
|
## Manual Deployment |
||||||
|
|
||||||
|
Below is the procedure for manual deployment of BlockScout. For automated deployment, see [ansible deployment](ansible-deployment.md). |
||||||
|
|
||||||
|
1. `git clone https://github.com/poanetwork/blockscout` |
||||||
|
|
||||||
|
2. `cd blockscout` |
||||||
|
|
||||||
|
3. Setup default configurations: |
||||||
|
`cp apps/explorer/config/dev.secret.exs.example apps/explorer/config/dev.secret.exs` |
||||||
|
|
||||||
|
`cp apps/block_scout_web/config/dev.secret.exs.example apps/block_scout_web/config/dev.secret.exs` |
||||||
|
|
||||||
|
4. Update `apps/explorer/config/dev.secret.exs` |
||||||
|
|
||||||
|
**Linux:** Update the database username and password configuration |
||||||
|
|
||||||
|
**Mac:** Remove the `username` and `password` fields |
||||||
|
|
||||||
|
**Optional:** Set up a default configuration for testing. `cp apps/explorer/config/test.secret.exs.example apps/explorer/config/test.secret.exs` Example usage: Changing the default Postgres port from localhost:15432 if [Boxen](https://github.com/boxen/boxen) is installed. |
||||||
|
|
||||||
|
5. If you have deployed previously, delete the `apps/block_scout_web/priv/static` folder. This removes static assets from the previous build. |
||||||
|
|
||||||
|
6. Install dependencies. `mix do deps.get, local.rebar --force, deps.compile, compile` |
||||||
|
|
||||||
|
7. If not already running, start postgres: `pg_ctl -D /usr/local/var/postgres start` |
||||||
|
|
||||||
|
8. Create and migrate database `mix do ecto.create, ecto.migrate` |
||||||
|
|
||||||
|
*Note:* If you have run previously, drop the previous database |
||||||
|
`mix do ecto.drop, ecto.create, ecto.migrate` |
||||||
|
|
||||||
|
9. Install Node.js dependencies |
||||||
|
|
||||||
|
- `cd apps/block_scout_web/assets; npm install && node_modules/webpack/bin/webpack.js --mode production; cd -` |
||||||
|
|
||||||
|
- `cd apps/explorer && npm install; cd -` |
||||||
|
|
||||||
|
10. Enable HTTPS in development. The Phoenix server only runs with HTTPS. |
||||||
|
|
||||||
|
* `cd apps/block_scout_web` |
||||||
|
|
||||||
|
* `mix phx.gen.cert blockscout blockscout.local; cd -` |
||||||
|
* Add blockscout and blockscout.local to your `/etc/hosts` |
||||||
|
``` |
||||||
|
|
||||||
|
127.0.0.1 localhost blockscout blockscout.local |
||||||
|
|
||||||
|
255.255.255.255 broadcasthost |
||||||
|
|
||||||
|
::1 localhost blockscout blockscout.local |
||||||
|
|
||||||
|
``` |
||||||
|
|
||||||
|
* If using Chrome, Enable `chrome://flags/#allow-insecure-localhost` . |
||||||
|
|
||||||
|
11. Set your [environment variables](env-variables.md) as needed. For example: |
||||||
|
```bash |
||||||
|
export COIN=DAI |
||||||
|
export NETWORK_ICON=_network_icon.html |
||||||
|
export ... |
||||||
|
``` |
||||||
|
|
||||||
|
12. Return to the root directory and start the Phoenix Server. `mix phx.server` |
||||||
|
|
||||||
|
## Check your instance: |
||||||
|
|
||||||
|
13. Check that there are no visual artifacts, all assets exist and there are no database errors. |
||||||
|
|
||||||
|
14. If there are no errors, stop BlockScout (`ctrl+c`) |
||||||
|
|
||||||
|
15. Build static assets for deployment `mix phx.digest` |
||||||
|
|
||||||
|
16. Delete build artifacts: |
||||||
|
|
||||||
|
a. Script: `./rel/commands/clear_build.sh` |
||||||
|
|
||||||
|
b. Manually: |
||||||
|
- delete `_build` & `deps` directories |
||||||
|
- delete node modules located at |
||||||
|
- `apps/block_scout_web/assets/node_modules` |
||||||
|
- & `apps/explorer/node_modules` |
||||||
|
- delete `logs/dev` directory |
@ -0,0 +1,13 @@ |
|||||||
|
<!--memory-usage.md --> |
||||||
|
|
||||||
|
## Memory Usage |
||||||
|
|
||||||
|
The work queues for building the index of all blocks, balances (coin and token), and internal transactions can grow quite large. By default, the soft-limit is 1 GiB, which can be changed in `apps/indexer/config/config.exs`: |
||||||
|
|
||||||
|
``` |
||||||
|
config :indexer, memory_limit: 1 <<< 30 |
||||||
|
``` |
||||||
|
|
||||||
|
Memory usage is checked once per minute. If the soft-limit is reached, the shrinkable work queues will shed half their load. The shed load will be restored from the database, the same as when a restart of the server occurs, so rebuilding the work queue will be slower, but use less memory. |
||||||
|
|
||||||
|
If all queues are at their minimum size, then no more memory can be reclaimed and an error will be logged. |
@ -0,0 +1,40 @@ |
|||||||
|
<!--metrics.md --> |
||||||
|
|
||||||
|
## Metrics |
||||||
|
|
||||||
|
### Wobserver |
||||||
|
|
||||||
|
[Wobserver](https://github.com/shinyscorpion/wobserver) is configured to display data from the `/metrics` endpoint in a web interface. To view, go to `/wobserver` for the chain you would like to view. |
||||||
|
|
||||||
|
For example `https://blockscout.com/eth/mainnet/wobserver` |
||||||
|
|
||||||
|
### Prometheus |
||||||
|
|
||||||
|
BlockScout is setup to export [Prometheus](https://prometheus.io/) metrics at `/metrics`. |
||||||
|
|
||||||
|
1. Install prometheus: `brew install prometheus` |
||||||
|
2. Start the web server `iex -S mix phx.server` |
||||||
|
3. Start prometheus: `prometheus --config.file=prometheus.yml` |
||||||
|
|
||||||
|
### Grafana |
||||||
|
|
||||||
|
The Grafana dashboard may also be used for metrics display. |
||||||
|
|
||||||
|
1. Install grafana: `brew install grafana` |
||||||
|
2. Install Pie Chart panel plugin: `grafana-cli plugins install grafana-piechart-panel` |
||||||
|
3. Start grafana: `brew services start grafana` |
||||||
|
4. Add Prometheus as a Data Source |
||||||
|
1. `open http://localhost:3000/datasources` |
||||||
|
2. Click "+ Add data source" |
||||||
|
3. Put "Prometheus" for "Name" |
||||||
|
4. Change "Type" to "Prometheus" |
||||||
|
5. Set "URL" to "http://localhost:9090" |
||||||
|
6. Set "Scrape Interval" to "10s" |
||||||
|
5. Add the dashboards from https://github.com/deadtrickster/beam-dashboards: |
||||||
|
For each `*.json` file in the repo. |
||||||
|
1. `open http://localhost:3000/dashboard/import` |
||||||
|
2. Copy the contents of the JSON file in the "Or paste JSON" entry |
||||||
|
3. Click "Load" |
||||||
|
6. View the dashboards. (You will need to click-around and use BlockScout for the web-related metrics to show up.) |
||||||
|
|
||||||
|
|
@ -0,0 +1,20 @@ |
|||||||
|
<!-- projects.md --> |
||||||
|
|
||||||
|
### Supported Projects |
||||||
|
|
||||||
|
| **Hosted Mainnets** | **Hosted Testnets** | **Additional Chains using BlockScout** | |
||||||
|
|--------------------------------------------------------|-------------------------------------------------------|----------------------------------------------------| |
||||||
|
| [Aerum](https://blockscout.com/aerum/mainnet) | [Goerli Testnet](https://blockscout.com/eth/goerli) | [ARTIS](https://explorer.sigma1.artis.network) | |
||||||
|
| [Callisto](https://blockscout.com/callisto/mainnet) | [Kovan Testnet](https://blockscout.com/eth/kovan) | [Ether-1](https://blocks.ether1.wattpool.net/) | |
||||||
|
| [Ethereum Classic](https://blockscout.com/etc/mainnet) | [POA Sokol Testnet](https://blockscout.com/poa/sokol) | [Fuse Network](https://explorer.fuse.io/) | |
||||||
|
| [Ethereum Mainnet](https://blockscout.com/eth/mainnet) | [Rinkeby Testnet](https://blockscout.com/eth/rinkeby) | [Oasis Labs](https://blockexplorer.oasiscloud.io/) | |
||||||
|
| [POA Core Network](https://blockscout.com/poa/core) | [Ropsten Testnet](https://blockscout.com/eth/ropsten) | [Petrichor](https://explorer.petrachor.com/) | |
||||||
|
| [RSK](https://blockscout.com/rsk/mainnet) | | [PIRL](http://pirl.es/) | |
||||||
|
| [xDai Chain](https://blockscout.com/poa/dai) | | [SafeChain](https://explorer.safechain.io) | |
||||||
|
| | | [SpringChain](https://explorer.springrole.com/) | |
||||||
|
| | | [Kotti Testnet](https://kottiexplorer.ethernode.io/) | |
||||||
|
| | | [Loom](http://plasma-blockexplorer.dappchains.com/) | |
||||||
|
| | | [Tenda](https://tenda.network) | |
||||||
|
|
||||||
|
|
||||||
|
Current BlockScout versions for hosted projects are available [on the forum](https://forum.poa.network/t/deployed-instances-on-blockscout-com/1938). |
@ -0,0 +1,15 @@ |
|||||||
|
<!-- requirements.md --> |
||||||
|
|
||||||
|
## Requirements |
||||||
|
|
||||||
|
| Dependency | Mac | Linux | |
||||||
|
|-------------|-----|-------| |
||||||
|
| [Erlang/OTP 21.0.4](https://github.com/erlang/otp) | `brew install erlang` | [Erlang Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L134) | |
||||||
|
| [Elixir 1.8.1](https://elixir-lang.org/) | :point_up: | [Elixir Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L138) | |
||||||
|
| [Postgres 10.3](https://www.postgresql.org/) | `brew install postgresql` | [Postgres Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L187) | |
||||||
|
| [Node.js 10.x.x](https://nodejs.org/en/) | `brew install node` | [Node.js Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L66) | |
||||||
|
| [Automake](https://www.gnu.org/software/automake/) | `brew install automake` | [Automake Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L72) | |
||||||
|
| [Libtool](https://www.gnu.org/software/libtool/) | `brew install libtool` | [Libtool Install Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L62) | |
||||||
|
| [Inotify-tools](https://github.com/rvoicilas/inotify-tools/wiki) | Not Required | Ubuntu - `apt-get install inotify-tools` | |
||||||
|
| [GCC Compiler](https://gcc.gnu.org/) | `brew install gcc` | [GCC Compiler Example](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L70) | |
||||||
|
| [GMP](https://gmplib.org/) | `brew install gmp` | [Install GMP Devel](https://github.com/poanetwork/blockscout-terraform/blob/33f68e816e36dc2fb055911fa0372531f0e956e7/modules/stack/libexec/init.sh#L74) | |
@ -0,0 +1,7 @@ |
|||||||
|
<!--restarts.md --> |
||||||
|
|
||||||
|
## Automating Restarts |
||||||
|
|
||||||
|
By default `BlockScout` does not restart if it crashes. To enable automated restarts, set the [environment variable](env-variables.md) `HEART_COMMAND` to whatever command you run to start `BlockScout`. Configure the heart beat timeout to change how long it waits before considering the application unresponsive. |
||||||
|
|
||||||
|
At that point, it will kill the current blockscout instance and execute the `HEART_COMMAND`. By default a crash dump is not written unless you set `ERL_CRASH_DUMP_SECONDS` to a positive or negative integer. See the [heart](http://erlang.org/doc/man/heart.html) documentation for more information. |
@ -0,0 +1,92 @@ |
|||||||
|
<!-- smart-contract.md --> |
||||||
|
|
||||||
|
# Verifying a smart contract in BlockScout |
||||||
|
|
||||||
|
Once verified, a smart contract or token contract's source code becomes publicly available and verifiable. This creates transparency and trust. Plus, it's easy to do! |
||||||
|
|
||||||
|
1. Go to [blockscout.com](https://blockscout.com/), verify you are on the chain where the contract was deployed, and type the contract's address into the search bar. Your contract details should come up. |
||||||
|
|
||||||
|
2. Select the `Code` tab to view the bytecode. |
||||||
|
|
||||||
|
![BlockScout_1|690x391](_media/sc1.jpeg) |
||||||
|
|
||||||
|
3. In the code tab view, click the `Verify & Publish` button. |
||||||
|
|
||||||
|
![Blockscout_2|690x195](_media/sc2.jpeg) |
||||||
|
|
||||||
|
4. On the following screen, enter your contract details: |
||||||
|
1. **Contract Address:** The `0x` address supplied on contract creation. |
||||||
|
2. **Contract Name:** Name of the class whose constructor was called in the .sol file. For example, in `contract MyContract {..` **MyContract** is the contract name. |
||||||
|
3. **Compiler:** derived from the first line in the contract `pragma solidity X.X.X`. Use the corresponding compiler version rather than the nightly build. |
||||||
|
4. **EVM Version:** [See EVM version](#evm-version) |
||||||
|
5. **Optimization:** If you enabled optimization during compilation, check yes. |
||||||
|
6. **Enter the Solidity Contract Code:** You may need to flatten your solidity code if it utilizes a library or inherits dependencies from another contract. We recommend the [POA solidity flattener](https://github.com/poanetwork/solidity-flattener) or the [truffle flattener](https://www.npmjs.com/package/truffle-flattener) |
||||||
|
7.**Constructor Arguments:** [See this post for more info](https://forum.poa.network/t/smart-contract-verification-abi-encoded-constructor-arguments/2331) |
||||||
|
8.**Libraries:** Enter the name and Ox address for any required libraries called in the called in the .sol file. |
||||||
|
9. Click the `Verify and Publish` button. |
||||||
|
|
||||||
|
5. If all goes well, you will see a green checkmark next to the code, and an additional tab where you can read the contract. In addition, the contract name will appear in BlockScout with any transactions related to your contract. |
||||||
|
|
||||||
|
## Troubleshooting: |
||||||
|
|
||||||
|
If you receive the dreaded `There was an error compiling your contract` message this means the bytecode doesn't match the supplied sourcecode. Unfortunately, there are many reasons this may be the case. Here are a few things to try: |
||||||
|
|
||||||
|
1. Double check the compiler version is correct. |
||||||
|
|
||||||
|
2. Check that an extra space has not been added to the end of the contract. When pasting in, an extra space may be added. Delete this and attempt to recompile. |
||||||
|
|
||||||
|
3. Copy, paste and verify your source code in Remix. You may find some exceptions here. |
||||||
|
|
||||||
|
|
||||||
|
# EVM Version |
||||||
|
|
||||||
|
You are asked to provide the EVM version the contract uses during the verification process. If the bytecode does not match the version, we try to verify using the latest EVM version. |
||||||
|
|
||||||
|
For more information, see the [Solidity docs on specifying the EVM version when compiling a contract](https://solidity.readthedocs.io/en/v0.5.3/using-the-compiler.html). Note that backward compatibility is not guaranteed between each version. |
||||||
|
|
||||||
|
||Name|Date|Mainnet Block #|Relevant changes / opcode specs|EIP details| |
||||||
|
| --- | --- | --- | --- | --- | --- | |
||||||
|
|1|Homestead|2016-03-14|1,150,000|Oldest version|http://eips.ethereum.org/EIPS/eip-606| |
||||||
|
|2|Tangerine Whistle|2016-10-18|2,463,000|Gas cost to access other accounts increased, impacts gas estimation and optimization. <br /><br />All gas sent by default for external calls, previously a certain amount had to be retained.|http://eips.ethereum.org/EIPS/eip-608| |
||||||
|
|3|Spurious Dragon|2016-11-18|2,675,000|Gas cost for the `exp` opcode increased, impacts gas estimation and optimization.|http://eips.ethereum.org/EIPS/eip-607| |
||||||
|
|4|Byzantium|2017-12-17|4,370,000|Opcodes `returndatacopy`, `returndatasize` and `staticcall` available in assembly.<br /><br /> `staticcall` opcode used when calling non-library view or pure functions, which prevents the functions from modifying state at the EVM level, this even applies to invalid type conversions.<br /><br /> Ability to access dynamic data returned from function calls. <br /><br /> `revert` opcode introduced, `revert()` will not waste gas.|http://eips.ethereum.org/EIPS/eip-609| |
||||||
|
|5|Constantinople|2019-02-22|7,280,000|Opcodes `create2`, `extcodehash`, `shl`, `shr` and `sar` are available in assembly.<br /><br /> Bitwise shifting operators use shifting opcodes (`shl`,`shr`,`sar`), requiring less gas.|http://eips.ethereum.org/EIPS/eip-1013| |
||||||
|
|6|Petersburg|2019-02-22|7,280,000|No changes related to contract compiling (removes EIP 1283)|http://eips.ethereum.org/EIPS/eip-1716| |
||||||
|
|
||||||
|
# ABI-Encoded Constructor Arguments |
||||||
|
|
||||||
|
If Constructor Arguments are required by the contract, you will add them to the Constructor Arguments field in [ABI hex encoded form](https://solidity.readthedocs.io/en/develop/abi-spec.html). Constructor arguments are appended to the END of the contract source bytecode when compiled by Solidity. |
||||||
|
|
||||||
|
An easy way to find these arguments is to compare the ‘raw input’ code in the transaction details to to the contract creation code in the code section of the contract. |
||||||
|
|
||||||
|
1. Access the contract creation TX in BlockScout. This is the transaction that created the contract, not the address of the actual contract. You should see a link to it in your wallet history. |
||||||
|
|
||||||
|
![nifty_wallet_history|294x500,75%](_media/abi1.jpeg) |
||||||
|
|
||||||
|
2. Go to the transaction details page for the contract creation TX. Within the details, you will see the Raw input. Copy this input in Hex format and paste into a txt or spreadsheet where you will compare against a second ABI code. |
||||||
|
|
||||||
|
![copy_raw_input|548x500](_media/abi2.jpeg) |
||||||
|
|
||||||
|
3. Go to the contract creation address. You can access through the transaction details at the top: |
||||||
|
|
||||||
|
![contract_address|548x500](_media/abi3.jpeg) |
||||||
|
|
||||||
|
4. In Contract Address Details, click on the Code tab. |
||||||
|
|
||||||
|
![code_tab|690x417](_media/abi4.jpeg) |
||||||
|
|
||||||
|
5. Copy the contract creation code. |
||||||
|
|
||||||
|
![copy_contract_creation_code|690x407](_media/abi5.jpeg) |
||||||
|
|
||||||
|
6. Paste into a document next to the original raw input ABI. This will allow you to compare the two. Anything that appears at the **END** of the Raw input code that does not exist at the end of the Contract Code is the ABI code for the constructor arguments. |
||||||
|
|
||||||
|
![contract_compare|690x177](_media/abi6.jpeg) |
||||||
|
|
||||||
|
7. The code may differ in other ways, but the constructor arguments will appear at the end. Copy this extra code and paste into the constructor arguments field along with the other information needed to verify your contract. |
||||||
|
|
||||||
|
![smart_contract_paste|620x500](_media/abi7.jpeg) |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,3 @@ |
|||||||
|
<!-- terminology.md --> |
||||||
|
|
||||||
|
_Coming Soon_ |
@ -0,0 +1,82 @@ |
|||||||
|
<!--testing.md --> |
||||||
|
|
||||||
|
## Testing |
||||||
|
|
||||||
|
### Requirements |
||||||
|
|
||||||
|
* PhantomJS (for wallaby) |
||||||
|
|
||||||
|
### Running tests |
||||||
|
|
||||||
|
1. Build assets. |
||||||
|
`cd apps/block_scout_web/assets && npm run build; cd -` |
||||||
|
|
||||||
|
2. Format Elixir code. |
||||||
|
`mix format` |
||||||
|
|
||||||
|
3. Run the test suite with coverage for whole umbrella project. This step can be run with different configuration outlined below. |
||||||
|
`mix coveralls.html --umbrella` |
||||||
|
|
||||||
|
4. Lint Elixir code. |
||||||
|
`mix credo --strict` |
||||||
|
|
||||||
|
5. Run the dialyzer. |
||||||
|
`mix dialyzer --halt-exit-status` |
||||||
|
|
||||||
|
6. Check the Elixir code for vulnerabilities. |
||||||
|
`cd apps/explorer && mix sobelow --config; cd -` |
||||||
|
`cd apps/block_scout_web && mix sobelow --config; cd -` |
||||||
|
|
||||||
|
7. Lint JavaScript code. |
||||||
|
`cd apps/block_scout_web/assets && npm run eslint; cd -` |
||||||
|
|
||||||
|
8. Test JavaScript code. |
||||||
|
`cd apps/block_scout_web/assets && npm run test; cd -` |
||||||
|
|
||||||
|
#### Parity |
||||||
|
|
||||||
|
##### Mox |
||||||
|
|
||||||
|
**This is the default setup. `mix coveralls.html --umbrella` will work on its own, but to be explicit, use the following setup**: |
||||||
|
|
||||||
|
```shell |
||||||
|
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Parity.Mox |
||||||
|
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Mox |
||||||
|
mix coveralls.html --umbrella --exclude no_parity |
||||||
|
``` |
||||||
|
|
||||||
|
##### HTTP / WebSocket |
||||||
|
|
||||||
|
```shell |
||||||
|
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Parity.HTTPWebSocket |
||||||
|
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Parity |
||||||
|
mix coveralls.html --umbrella --exclude no_parity |
||||||
|
``` |
||||||
|
|
||||||
|
| Protocol | URL | |
||||||
|
|:----------|:-----------------------------------| |
||||||
|
| HTTP | `http://localhost:8545` | |
||||||
|
| WebSocket | `ws://localhost:8546` | |
||||||
|
|
||||||
|
#### Geth |
||||||
|
|
||||||
|
##### Mox |
||||||
|
|
||||||
|
```shell |
||||||
|
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Geth.Mox |
||||||
|
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Mox |
||||||
|
mix coveralls.html --umbrella --exclude no_geth |
||||||
|
``` |
||||||
|
|
||||||
|
##### HTTP / WebSocket |
||||||
|
|
||||||
|
```shell |
||||||
|
export ETHEREUM_JSONRPC_CASE=EthereumJSONRPC.Case.Geth.HTTPWebSocket |
||||||
|
export ETHEREUM_JSONRPC_WEB_SOCKET_CASE=EthereumJSONRPC.WebSocket.Case.Geth |
||||||
|
mix coveralls.html --umbrella --exclude no_geth |
||||||
|
``` |
||||||
|
|
||||||
|
| Protocol | URL | |
||||||
|
|:----------|:--------------------------------------------------| |
||||||
|
| HTTP | `https://mainnet.infura.io/8lTvJTKmHPCHazkneJsY` | |
||||||
|
| WebSocket | `wss://mainnet.infura.io/ws/8lTvJTKmHPCHazkneJsY` | |
@ -0,0 +1,25 @@ |
|||||||
|
<!--tracing.md --> |
||||||
|
|
||||||
|
## Tracing |
||||||
|
|
||||||
|
Blockscout supports tracing via [Spandex](http://git@github.com:spandex-project/spandex.git). Each application has its own internally configured tracer. |
||||||
|
|
||||||
|
To enable tracing, visit each application's `config/<env>.ex` and change `disabled?: true` to `disabled?: false`. Do this for |
||||||
|
each application you'd like included in your trace data. |
||||||
|
|
||||||
|
Currently, only [Datadog](https://www.datadoghq.com/) is supported as a |
||||||
|
tracing backend, but more will be added soon. |
||||||
|
|
||||||
|
### DataDog |
||||||
|
|
||||||
|
If you would like to use DataDog, after enabling `Spandex`, set |
||||||
|
`"DATADOG_HOST"` and `"DATADOG_PORT"` environment variables to the |
||||||
|
host/port that your Datadog agent is running on. For more information on |
||||||
|
Datadog and the Datadog agent, see the [documentation](https://docs.datadoghq.com/). |
||||||
|
|
||||||
|
### Other |
||||||
|
|
||||||
|
If you want to use a different backend, remove the |
||||||
|
`SpandexDatadog.ApiServer` `Supervisor.child_spec` from |
||||||
|
`Explorer.Application` and follow any instructions provided in `Spandex` |
||||||
|
for setting up that backend. |
@ -0,0 +1,14 @@ |
|||||||
|
<!-- umbrella.md --> |
||||||
|
|
||||||
|
## Umbrella Project Organization |
||||||
|
|
||||||
|
BlockScout is an Elixir [umbrella project](https://elixir-lang.org/getting-started/mix-otp/dependencies-and-umbrella-projects.html). Each directory under `apps/` is a separate [Mix](https://hexdocs.pm/mix/Mix.html) project and [OTP application](https://hexdocs.pm/elixir/Application.html), but the projects can use each other as a dependency in their `mix.exs`. |
||||||
|
|
||||||
|
Each OTP application has a restricted domain. |
||||||
|
|
||||||
|
| Directory | OTP Application | Namespace | Purpose | |
||||||
|
|:------------------------|:--------------------|:------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |
||||||
|
| `apps/ethereum_jsonrpc` | `:ethereum_jsonrpc` | `EthereumJSONRPC` | Ethereum JSONRPC client. It is allowed to know `Explorer`'s param format, but it cannot directly depend on `:explorer` | |
||||||
|
| `apps/explorer` | `:explorer` | `Explorer` | Storage for the indexed chain. Can read and write to the backing storage. MUST be able to boot in a read-only mode when run independently from `:indexer`, so cannot depend on `:indexer` as that would start `:indexer` indexing. | |
||||||
|
| `apps/block_scout_web` | `:block_scout_web` | `BlockScoutWeb` | Phoenix interface to `:explorer`. The minimum interface to allow web access should go in `:block_scout_web`. Any business rules or interface not tied directly to `Phoenix` or `Plug` should go in `:explorer`. MUST be able to boot in a read-only mode when run independently from `:indexer`, so cannot depend on `:indexer` as that would start `:indexer` indexing. | |
||||||
|
| `apps/indexer` | `:indexer` | `Indexer` | Uses `:ethereum_jsonrpc` to index chain and batch import data into `:explorer`. Any process, `Task`, or `GenServer` that automatically reads from the chain and writes to `:explorer` should be in `:indexer`. This restricts automatic writes to `:indexer` and read-only mode can be achieved by not running `:indexer`. | |
@ -0,0 +1,3 @@ |
|||||||
|
## Upgrading Guide |
||||||
|
|
||||||
|
**Upgrade instructions are in progress. If you need assistance with an upgrade, please contact us through the [forum](https://forum.poa.network/c/blockscout) or [gitter](https://gitter.im/poanetwork/blockscout) channel.** |
Before Width: | Height: | Size: 683 KiB |
Before Width: | Height: | Size: 840 KiB |