commit
4ec8ba3bec
@ -0,0 +1,80 @@ |
||||
import { asyncReducer, asyncInitialState } from '../../js/lib/async_listing_load' |
||||
|
||||
describe('ELEMENTS_LOAD', () => { |
||||
test('sets only nextPagePath and ignores other keys', () => { |
||||
const state = Object.assign({}, asyncInitialState) |
||||
const action = { type: 'ELEMENTS_LOAD', nextPagePath: 'set', foo: 1 } |
||||
const output = asyncReducer(state, action) |
||||
|
||||
expect(output.foo).not.toEqual(1) |
||||
expect(output.nextPagePath).toEqual('set') |
||||
}) |
||||
}) |
||||
|
||||
describe('ADD_ITEM_KEY', () => { |
||||
test('sets itemKey to what was passed in the action', () => { |
||||
const expectedItemKey = 'expected.Key' |
||||
|
||||
const state = Object.assign({}, asyncInitialState) |
||||
const action = { type: 'ADD_ITEM_KEY', itemKey: expectedItemKey }
|
||||
const output = asyncReducer(state, action) |
||||
|
||||
expect(output.itemKey).toEqual(expectedItemKey) |
||||
}) |
||||
}) |
||||
|
||||
describe('START_REQUEST', () => { |
||||
test('sets loading status to true', () => { |
||||
const state = Object.assign({}, asyncInitialState, { loading: false }) |
||||
const action = { type: 'START_REQUEST' }
|
||||
const output = asyncReducer(state, action) |
||||
|
||||
expect(output.loading).toEqual(true) |
||||
}) |
||||
}) |
||||
|
||||
describe('REQUEST_ERROR', () => { |
||||
test('sets requestError to true', () => { |
||||
const state = Object.assign({}, asyncInitialState, { requestError: false }) |
||||
const action = { type: 'REQUEST_ERROR' }
|
||||
const output = asyncReducer(state, action) |
||||
|
||||
expect(output.requestError).toEqual(true) |
||||
}) |
||||
}) |
||||
|
||||
describe('FINISH_REQUEST', () => { |
||||
test('sets loading status to false', () => { |
||||
const state = Object.assign({}, asyncInitialState, { |
||||
loading: true, |
||||
loadingFirstPage: true |
||||
}) |
||||
const action = { type: 'FINISH_REQUEST' }
|
||||
const output = asyncReducer(state, action) |
||||
|
||||
expect(output.loading).toEqual(false) |
||||
expect(output.loadingFirstPage).toEqual(false) |
||||
}) |
||||
}) |
||||
|
||||
describe('ITEMS_FETCHED', () => { |
||||
test('sets the items to what was passed in the action', () => { |
||||
const expectedItems = [1, 2, 3] |
||||
|
||||
const state = Object.assign({}, asyncInitialState) |
||||
const action = { type: 'ITEMS_FETCHED', items: expectedItems }
|
||||
const output = asyncReducer(state, action) |
||||
|
||||
expect(output.items).toEqual(expectedItems) |
||||
}) |
||||
}) |
||||
|
||||
describe('NAVIGATE_TO_OLDER', () => { |
||||
test('sets beyondPageOne to true', () => { |
||||
const state = Object.assign({}, asyncInitialState, { beyondPageOne: false }) |
||||
const action = { type: 'NAVIGATE_TO_OLDER' }
|
||||
const output = asyncReducer(state, action) |
||||
|
||||
expect(output.beyondPageOne).toEqual(true) |
||||
}) |
||||
}) |
@ -0,0 +1,19 @@ |
||||
//replace the default background color from highlightjs |
||||
.hljs { |
||||
background: $gray-100; |
||||
} |
||||
|
||||
.line-numbers { |
||||
|
||||
[data-line-number] { |
||||
|
||||
&:before { |
||||
content: attr(data-line-number); |
||||
display: inline-block; |
||||
border-right: 1px solid $gray-400; |
||||
padding: 0 .5em; |
||||
margin-right: .5em; |
||||
color: $gray-600 |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,9 @@ |
||||
import $ from 'jquery' |
||||
import hljs from 'highlight.js' |
||||
import hljsDefineSolidity from 'highlightjs-solidity' |
||||
|
||||
// only activate highlighting on pages with this selector
|
||||
if ($('[data-activate-highlight]').length > 0) { |
||||
hljsDefineSolidity(hljs) |
||||
hljs.initHighlightingOnLoad() |
||||
} |
@ -0,0 +1,121 @@ |
||||
defmodule BlockScoutWeb.Tokens.InventoryControllerTest do |
||||
use BlockScoutWeb.ConnCase |
||||
|
||||
describe "GET index/3" do |
||||
test "with invalid address hash", %{conn: conn} do |
||||
conn = get(conn, token_inventory_path(conn, :index, "invalid_address")) |
||||
|
||||
assert html_response(conn, 404) |
||||
end |
||||
|
||||
test "with a token that doesn't exist", %{conn: conn} do |
||||
address = build(:address) |
||||
conn = get(conn, token_inventory_path(conn, :index, address.hash)) |
||||
|
||||
assert html_response(conn, 404) |
||||
end |
||||
|
||||
test "successfully renders the page", %{conn: conn} do |
||||
token_contract_address = insert(:contract_address) |
||||
token = insert(:token, type: "ERC-721", contract_address: token_contract_address) |
||||
|
||||
transaction = |
||||
:transaction |
||||
|> insert() |
||||
|> with_block() |
||||
|
||||
insert( |
||||
:token_transfer, |
||||
transaction: transaction, |
||||
token_contract_address: token_contract_address, |
||||
token: token |
||||
) |
||||
|
||||
conn = |
||||
get( |
||||
conn, |
||||
token_inventory_path(conn, :index, token_contract_address.hash) |
||||
) |
||||
|
||||
assert html_response(conn, 200) |
||||
end |
||||
|
||||
test "returns next page of results based on last seen token balance", %{conn: conn} do |
||||
token = insert(:token, type: "ERC-721") |
||||
|
||||
transaction = |
||||
:transaction |
||||
|> insert() |
||||
|> with_block() |
||||
|
||||
second_page_token_balances = |
||||
Enum.map( |
||||
1..50, |
||||
&insert( |
||||
:token_transfer, |
||||
transaction: transaction, |
||||
token_contract_address: token.contract_address, |
||||
token: token, |
||||
token_id: &1 + 1000 |
||||
) |
||||
) |
||||
|
||||
conn = |
||||
get(conn, token_inventory_path(conn, :index, token.contract_address_hash), %{ |
||||
"token_id" => "999" |
||||
}) |
||||
|
||||
assert Enum.map(conn.assigns.unique_tokens, & &1.token_id) == Enum.map(second_page_token_balances, & &1.token_id) |
||||
end |
||||
|
||||
test "next_page_params exists if not on last page", %{conn: conn} do |
||||
token = insert(:token, type: "ERC-721") |
||||
|
||||
transaction = |
||||
:transaction |
||||
|> insert() |
||||
|> with_block() |
||||
|
||||
Enum.each( |
||||
1..51, |
||||
&insert( |
||||
:token_transfer, |
||||
transaction: transaction, |
||||
token_contract_address: token.contract_address, |
||||
token: token, |
||||
token_id: &1 + 1000 |
||||
) |
||||
) |
||||
|
||||
expected_next_page_params = %{ |
||||
"token_id" => to_string(token.contract_address_hash), |
||||
"unique_token" => 1050 |
||||
} |
||||
|
||||
conn = get(conn, token_inventory_path(conn, :index, token.contract_address_hash)) |
||||
|
||||
assert conn.assigns.next_page_params == expected_next_page_params |
||||
end |
||||
|
||||
test "next_page_params are empty if on last page", %{conn: conn} do |
||||
token = insert(:token, type: "ERC-721") |
||||
|
||||
transaction = |
||||
:transaction |
||||
|> insert() |
||||
|> with_block() |
||||
|
||||
insert( |
||||
:token_transfer, |
||||
transaction: transaction, |
||||
token_contract_address: token.contract_address, |
||||
token: token, |
||||
token_id: 1000 |
||||
) |
||||
|
||||
conn = get(conn, token_inventory_path(conn, :index, token.contract_address_hash)) |
||||
|
||||
refute conn.assigns.next_page_params |
||||
end |
||||
end |
||||
end |
@ -0,0 +1,93 @@ |
||||
defmodule Explorer.ReleaseTasks do |
||||
@moduledoc """ |
||||
Release tasks used to migrate or generate seeds. |
||||
""" |
||||
|
||||
alias Ecto.Migrator |
||||
|
||||
@start_apps [ |
||||
:crypto, |
||||
:ssl, |
||||
:postgrex, |
||||
:ecto, |
||||
# If using Ecto 3.0 or higher |
||||
:ecto_sql |
||||
] |
||||
|
||||
@repos Application.get_env(:blockscout, :ecto_repos, [Explorer.Repo]) |
||||
|
||||
def migrate(_argv) do |
||||
start_services() |
||||
|
||||
run_migrations() |
||||
|
||||
stop_services() |
||||
end |
||||
|
||||
def seed(_argv) do |
||||
start_services() |
||||
|
||||
run_migrations() |
||||
|
||||
run_seeds() |
||||
|
||||
stop_services() |
||||
end |
||||
|
||||
defp start_services do |
||||
IO.puts("Starting dependencies..") |
||||
# Start apps necessary for executing migrations |
||||
Enum.each(@start_apps, &Application.ensure_all_started/1) |
||||
|
||||
# Start the Repo(s) for app |
||||
IO.puts("Starting repos..") |
||||
|
||||
# Switch pool_size to 2 for ecto > 3.0 |
||||
Enum.each(@repos, & &1.start_link(pool_size: 1)) |
||||
end |
||||
|
||||
defp stop_services do |
||||
IO.puts("Success!") |
||||
:init.stop() |
||||
end |
||||
|
||||
defp run_migrations do |
||||
Enum.each(@repos, &run_migrations_for/1) |
||||
end |
||||
|
||||
defp run_migrations_for(repo) do |
||||
app = Keyword.get(repo.config, :otp_app) |
||||
IO.puts("Running migrations for #{app}") |
||||
migrations_path = priv_path_for(repo, "migrations") |
||||
Migrator.run(repo, migrations_path, :up, all: true) |
||||
end |
||||
|
||||
defp run_seeds do |
||||
Enum.each(@repos, &run_seeds_for/1) |
||||
end |
||||
|
||||
# sobelow_skip ["RCE.CodeModule"] |
||||
defp run_seeds_for(repo) do |
||||
# Run the seed script if it exists |
||||
seed_script = priv_path_for(repo, "seeds.exs") |
||||
|
||||
if File.exists?(seed_script) do |
||||
IO.puts("Running seed script..") |
||||
Code.eval_file(seed_script) |
||||
end |
||||
end |
||||
|
||||
defp priv_path_for(repo, filename) do |
||||
app = Keyword.get(repo.config, :otp_app) |
||||
|
||||
repo_underscore = |
||||
repo |
||||
|> Module.split() |
||||
|> List.last() |
||||
|> Macro.underscore() |
||||
|
||||
priv_dir = "#{:code.priv_dir(app)}" |
||||
|
||||
Path.join([priv_dir, repo_underscore, filename]) |
||||
end |
||||
end |
@ -0,0 +1,64 @@ |
||||
DO $$ |
||||
DECLARE |
||||
row_count integer := 1; |
||||
batch_size integer := 50000; -- HOW MANY ITEMS WILL BE UPDATED AT TIME |
||||
iterator integer := batch_size; |
||||
max_row_number integer; |
||||
next_iterator integer; |
||||
updated_row_count integer; |
||||
deleted_row_count integer; |
||||
BEGIN |
||||
DROP TABLE IF EXISTS current_suicide_internal_transactions_temp; |
||||
-- CREATES TEMP TABLE TO STORE DATA TO BE UPDATED |
||||
CREATE TEMP TABLE current_suicide_internal_transactions_temp( |
||||
transaction_hash bytea NOT NULL, |
||||
index bigint NOT NULL, |
||||
row_number integer |
||||
); |
||||
INSERT INTO current_suicide_internal_transactions_temp |
||||
SELECT DISTINCT ON (transaction_hash, index) |
||||
transaction_hash, |
||||
index, |
||||
ROW_NUMBER () OVER () |
||||
FROM internal_transactions |
||||
WHERE type = 'suicide' |
||||
ORDER BY transaction_hash, index DESC; |
||||
|
||||
max_row_number := (SELECT MAX(row_number) FROM current_suicide_internal_transactions_temp); |
||||
RAISE NOTICE '% items to be updated', max_row_number + 1; |
||||
|
||||
-- ITERATES THROUGH THE ITEMS UNTIL THE TEMP TABLE IS EMPTY |
||||
WHILE iterator <= max_row_number LOOP |
||||
next_iterator := iterator + batch_size; |
||||
|
||||
RAISE NOTICE '-> suicide internal transactions % to % to be updated', iterator, next_iterator - 1; |
||||
|
||||
UPDATE internal_transactions |
||||
SET type = 'selfdestruct' |
||||
FROM current_suicide_internal_transactions_temp |
||||
WHERE internal_transactions.transaction_hash = current_suicide_internal_transactions_temp.transaction_hash AND |
||||
internal_transactions.index = current_suicide_internal_transactions_temp.index AND |
||||
current_suicide_internal_transactions_temp.row_number < next_iterator; |
||||
|
||||
GET DIAGNOSTICS updated_row_count = ROW_COUNT; |
||||
|
||||
RAISE NOTICE '-> % internal transactions updated from suicide to selfdesruct', updated_row_count; |
||||
|
||||
DELETE FROM current_suicide_internal_transactions_temp |
||||
WHERE row_number < next_iterator; |
||||
|
||||
GET DIAGNOSTICS deleted_row_count = ROW_COUNT; |
||||
|
||||
ASSERT updated_row_count = deleted_row_count; |
||||
|
||||
-- COMMITS THE BATCH UPDATES |
||||
CHECKPOINT; |
||||
|
||||
-- UPDATES THE COUNTER SO IT DOESN'T TURN INTO AN INFINITE LOOP |
||||
iterator := next_iterator; |
||||
END LOOP; |
||||
|
||||
RAISE NOTICE 'All suicide type internal transactions updated to selfdestruct. Validating constraint.'; |
||||
|
||||
ALTER TABLE internal_transactions VALIDATE CONSTRAINT selfdestruct_has_from_and_to_address; |
||||
END $$; |
@ -0,0 +1,80 @@ |
||||
DO $$ |
||||
DECLARE |
||||
row_count integer := 1; |
||||
batch_size integer := 50000; -- HOW MANY ITEMS WILL BE UPDATED AT TIME |
||||
iterator integer := batch_size; |
||||
max_row_number integer; |
||||
next_iterator integer; |
||||
updated_transaction_count integer; |
||||
deleted_internal_transaction_count integer; |
||||
deleted_row_count integer; |
||||
BEGIN |
||||
DROP TABLE IF EXISTS transactions_with_deprecated_internal_transactions; |
||||
-- CREATES TEMP TABLE TO STORE DATA TO BE UPDATED |
||||
CREATE TEMP TABLE transactions_with_deprecated_internal_transactions( |
||||
hash bytea NOT NULL, |
||||
row_number integer |
||||
); |
||||
INSERT INTO transactions_with_deprecated_internal_transactions |
||||
SELECT DISTINCT ON (transaction_hash) |
||||
transaction_hash, |
||||
ROW_NUMBER () OVER () |
||||
FROM internal_transactions |
||||
WHERE |
||||
-- call_has_call_type CONSTRAINT |
||||
(type = 'call' AND call_type IS NULL) OR |
||||
-- call_has_input CONSTRAINT |
||||
(type = 'call' AND input IS NULL) OR |
||||
-- create_has_init CONSTRAINT |
||||
(type = 'create' AND init is NULL) |
||||
ORDER BY transaction_hash DESC; |
||||
|
||||
max_row_number := (SELECT MAX(row_number) FROM transactions_with_deprecated_internal_transactions); |
||||
RAISE NOTICE '% transactions to be updated', max_row_number + 1; |
||||
|
||||
-- ITERATES THROUGH THE ITEMS UNTIL THE TEMP TABLE IS EMPTY |
||||
WHILE iterator <= max_row_number LOOP |
||||
next_iterator := iterator + batch_size; |
||||
|
||||
RAISE NOTICE '-> transactions with deprecated internal transactions % to % to be updated', iterator, next_iterator - 1; |
||||
|
||||
UPDATE transactions |
||||
SET internal_transactions_indexed_at = NULL, |
||||
error = NULL |
||||
FROM transactions_with_deprecated_internal_transactions |
||||
WHERE transactions.hash = transactions_with_deprecated_internal_transactions.hash AND |
||||
transactions_with_deprecated_internal_transactions.row_number < next_iterator; |
||||
|
||||
GET DIAGNOSTICS updated_transaction_count = ROW_COUNT; |
||||
|
||||
RAISE NOTICE '-> % transactions updated to refetch internal transactions', updated_transaction_count; |
||||
|
||||
DELETE FROM internal_transactions |
||||
USING transactions_with_deprecated_internal_transactions |
||||
WHERE internal_transactions.transaction_hash = transactions_with_deprecated_internal_transactions.hash AND |
||||
transactions_with_deprecated_internal_transactions.row_number < next_iterator; |
||||
|
||||
GET DIAGNOSTICS deleted_internal_transaction_count = ROW_COUNT; |
||||
|
||||
RAISE NOTICE '-> % internal transactions deleted', deleted_internal_transaction_count; |
||||
|
||||
DELETE FROM transactions_with_deprecated_internal_transactions |
||||
WHERE row_number < next_iterator; |
||||
|
||||
GET DIAGNOSTICS deleted_row_count = ROW_COUNT; |
||||
|
||||
ASSERT updated_transaction_count = deleted_row_count; |
||||
|
||||
-- COMMITS THE BATCH UPDATES |
||||
CHECKPOINT; |
||||
|
||||
-- UPDATES THE COUNTER SO IT DOESN'T TURN INTO AN INFINITE LOOP |
||||
iterator := next_iterator; |
||||
END LOOP; |
||||
|
||||
RAISE NOTICE 'All deprecated internal transactions will be refetched. Validating constraints.'; |
||||
|
||||
ALTER TABLE internal_transactions VALIDATE CONSTRAINT call_has_call_type; |
||||
ALTER TABLE internal_transactions VALIDATE CONSTRAINT call_has_input; |
||||
ALTER TABLE internal_transactions VALIDATE CONSTRAINT create_has_init; |
||||
END $$; |
@ -0,0 +1,3 @@ |
||||
#!/bin/sh |
||||
|
||||
release_ctl eval --mfa "Explorer.ReleaseTasks.migrate/1" --argv -- "$@" |
@ -0,0 +1,3 @@ |
||||
#!/bin/sh |
||||
|
||||
release_ctl eval --mfa "Explorer.ReleaseTasks.seed/1" --argv -- "$@" |
@ -0,0 +1,85 @@ |
||||
# Import all plugins from `rel/plugins` |
||||
# They can then be used by adding `plugin MyPlugin` to |
||||
# either an environment, or release definition, where |
||||
# `MyPlugin` is the name of the plugin module. |
||||
~w(rel plugins *.exs) |
||||
|> Path.join() |
||||
|> Path.wildcard() |
||||
|> Enum.map(&Code.eval_file(&1)) |
||||
|
||||
defer = fn fun -> |
||||
apply(fun, []) |
||||
end |
||||
|
||||
app_root = fn -> |
||||
if String.contains?(System.cwd!(), "apps") do |
||||
Path.join([System.cwd!(), "/../../"]) |
||||
else |
||||
System.cwd!() |
||||
end |
||||
end |
||||
|
||||
cookie = |
||||
defer.(fn -> |
||||
cookie_bytes = |
||||
:crypto.strong_rand_bytes(32) |
||||
|> Base.encode32() |
||||
|
||||
:ok = File.write!(Path.join(app_root.(), ".erlang_cookie"), cookie_bytes) |
||||
:erlang.binary_to_atom(cookie_bytes, :utf8) |
||||
end) |
||||
|
||||
use Mix.Releases.Config, |
||||
# This sets the default release built by `mix release` |
||||
default_release: :default, |
||||
# This sets the default environment used by `mix release` |
||||
default_environment: Mix.env() |
||||
|
||||
# For a full list of config options for both releases |
||||
# and environments, visit https://hexdocs.pm/distillery/config/distillery.html |
||||
|
||||
|
||||
# You may define one or more environments in this file, |
||||
# an environment's settings will override those of a release |
||||
# when building in that environment, this combination of release |
||||
# and environment configuration is called a profile |
||||
|
||||
environment :dev do |
||||
# If you are running Phoenix, you should make sure that |
||||
# server: true is set and the code reloader is disabled, |
||||
# even in dev mode. |
||||
# It is recommended that you build with MIX_ENV=prod and pass |
||||
# the --env flag to Distillery explicitly if you want to use |
||||
# dev mode. |
||||
set dev_mode: true |
||||
set include_erts: false |
||||
set cookie: :"i6E,!mJ6|E&|.VPaDywo@N.o}BgmC$UdKXW[aK,(@U0Asfpp/NergA;CR%YW4;i6" |
||||
end |
||||
|
||||
environment :prod do |
||||
set include_erts: true |
||||
set include_src: false |
||||
set cookie: cookie |
||||
set vm_args: "rel/vm.args" |
||||
end |
||||
|
||||
# You may define one or more releases in this file. |
||||
# If you have not set a default release, or selected one |
||||
# when running `mix release`, the first release in the file |
||||
# will be used by default |
||||
|
||||
release :blockscout do |
||||
set version: "1.2.0-beta" |
||||
set applications: [ |
||||
:runtime_tools, |
||||
block_scout_web: :permanent, |
||||
ethereum_jsonrpc: :permanent, |
||||
explorer: :permanent, |
||||
indexer: :permanent |
||||
] |
||||
set commands: [ |
||||
migrate: "rel/commands/migrate.sh", |
||||
seed: "rel/commands/seed.sh", |
||||
] |
||||
end |
||||
|
@ -0,0 +1,3 @@ |
||||
*.* |
||||
!*.exs |
||||
!.gitignore |
@ -0,0 +1,30 @@ |
||||
## This file provide the arguments provided to the VM at startup |
||||
## You can find a full list of flags and their behaviours at |
||||
## http://erlang.org/doc/man/erl.html |
||||
|
||||
## Name of the node |
||||
-name <%= release_name %>@127.0.0.1 |
||||
|
||||
## Cookie for distributed erlang |
||||
-setcookie <%= release.profile.cookie %> |
||||
|
||||
## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive |
||||
## (Disabled by default..use with caution!) |
||||
##-heart |
||||
|
||||
## Enable kernel poll and a few async threads |
||||
##+K true |
||||
##+A 5 |
||||
## For OTP21+, the +A flag is not used anymore, |
||||
## +SDio replace it to use dirty schedulers |
||||
##+SDio 5 |
||||
|
||||
## Increase number of concurrent ports/sockets |
||||
##-env ERL_MAX_PORTS 4096 |
||||
|
||||
## Tweak GC to run more often |
||||
##-env ERL_FULLSWEEP_AFTER 10 |
||||
|
||||
# Enable SMP automatically based on availability |
||||
# On OTP21+, this is not needed anymore. |
||||
-smp auto |
Loading…
Reference in new issue