Merge branch 'main' into dan/non-evm-cli-warp-deploy

dan/non-evm-cli-warp-deploy
Daniel Savu 3 weeks ago
commit 3abe91bb9b
No known key found for this signature in database
GPG Key ID: 795E587829AF7E08
  1. 5
      .changeset/chatty-glasses-buy.md
  2. 5
      .changeset/cold-dingos-give.md
  3. 4
      .changeset/config.json
  4. 6
      .changeset/cuddly-baboons-drive.md
  5. 6
      .changeset/dirty-swans-drum.md
  6. 8
      .changeset/dull-days-yell.md
  7. 5
      .changeset/fresh-pigs-work.md
  8. 5
      .changeset/giant-items-suffer.md
  9. 5
      .changeset/gorgeous-dragons-swim.md
  10. 5
      .changeset/healthy-boats-lie.md
  11. 2
      .changeset/long-queens-deny.md
  12. 5
      .changeset/neat-sloths-agree.md
  13. 5
      .changeset/nice-deers-tan.md
  14. 11
      .changeset/plenty-pens-peel.md
  15. 5
      .changeset/red-nails-trade.md
  16. 5
      .changeset/silent-berries-attend.md
  17. 5
      .changeset/sixty-eggs-smoke.md
  18. 6
      .changeset/stale-planets-dance.md
  19. 5
      .changeset/sweet-houses-type.md
  20. 5
      .changeset/thin-tips-explain.md
  21. 5
      .changeset/tidy-meals-add.md
  22. 5
      .changeset/warm-grapes-talk.md
  23. 6
      .changeset/wicked-knives-care.md
  24. 2
      .codespell/.codespellrc
  25. 2
      .gitattributes
  26. 21
      .github/CODEOWNERS
  27. 37
      .github/actions/yarn-build-with-cache/action.yml
  28. 24
      .github/workflows/agent-release-artifacts.yml
  29. 14
      .github/workflows/codespell.yml
  30. 4
      .github/workflows/monorepo-docker.yml
  31. 8
      .github/workflows/rust-docker.yml
  32. 18
      .github/workflows/rust-skipped.yml
  33. 79
      .github/workflows/rust.yml
  34. 19
      .github/workflows/static-analysis.yml
  35. 4
      .github/workflows/storage-analysis.yml
  36. 106
      .github/workflows/test-skipped.yml
  37. 310
      .github/workflows/test.yml
  38. 1
      .gitignore
  39. 7
      .husky/pre-commit
  40. 2
      .registryrc
  41. 6
      .vscode/settings.json
  42. 1
      Dockerfile
  43. 11
      mono.code-workspace
  44. 1
      package.json
  45. 38
      rust/Dockerfile
  46. 4
      rust/README.md
  47. 170
      rust/agents/relayer/src/server/message_retry.rs
  48. 39
      rust/agents/scraper/src/conversions.rs
  49. 314
      rust/agents/validator/src/submit.rs
  50. 57
      rust/chains/hyperlane-cosmos/src/libs/account.rs
  51. 287
      rust/chains/hyperlane-cosmos/src/providers/mod.rs
  52. 54
      rust/chains/hyperlane-ethereum/src/config.rs
  53. 530
      rust/chains/hyperlane-fuel/abis/Mailbox.abi.json
  54. 164
      rust/chains/hyperlane-fuel/src/mailbox.rs
  55. 43
      rust/chains/hyperlane-fuel/src/provider.rs
  56. 35
      rust/chains/hyperlane-sealevel/Cargo.toml
  57. 29
      rust/chains/hyperlane-sealevel/src/client.rs
  58. 23
      rust/chains/hyperlane-sealevel/src/error.rs
  59. 84
      rust/chains/hyperlane-sealevel/src/provider.rs
  60. 93
      rust/chains/hyperlane-sealevel/src/utils.rs
  61. 1002
      rust/config/testnet_config.json
  62. 2
      rust/hyperlane-base/src/db/mod.rs
  63. 0
      rust/main/.cargo/config.toml
  64. 0
      rust/main/.vscode/extensions.json
  65. 0
      rust/main/.vscode/settings.json
  66. 3965
      rust/main/Cargo.lock
  67. 329
      rust/main/Cargo.toml
  68. 0
      rust/main/agents/relayer/.cargo/config.toml
  69. 14
      rust/main/agents/relayer/Cargo.toml
  70. 0
      rust/main/agents/relayer/src/lib.rs
  71. 0
      rust/main/agents/relayer/src/main.rs
  72. 0
      rust/main/agents/relayer/src/memory_profiler.rs
  73. 0
      rust/main/agents/relayer/src/merkle_tree/builder.rs
  74. 0
      rust/main/agents/relayer/src/merkle_tree/mod.rs
  75. 2
      rust/main/agents/relayer/src/merkle_tree/processor.rs
  76. 0
      rust/main/agents/relayer/src/msg/blacklist.rs
  77. 11
      rust/main/agents/relayer/src/msg/gas_payment/mod.rs
  78. 0
      rust/main/agents/relayer/src/msg/gas_payment/policies/minimum.rs
  79. 0
      rust/main/agents/relayer/src/msg/gas_payment/policies/mod.rs
  80. 0
      rust/main/agents/relayer/src/msg/gas_payment/policies/none.rs
  81. 0
      rust/main/agents/relayer/src/msg/gas_payment/policies/on_chain_fee_quoting.rs
  82. 1
      rust/main/agents/relayer/src/msg/metadata/aggregation.rs
  83. 7
      rust/main/agents/relayer/src/msg/metadata/base.rs
  84. 2
      rust/main/agents/relayer/src/msg/metadata/ccip_read.rs
  85. 0
      rust/main/agents/relayer/src/msg/metadata/mod.rs
  86. 4
      rust/main/agents/relayer/src/msg/metadata/multisig/base.rs
  87. 2
      rust/main/agents/relayer/src/msg/metadata/multisig/merkle_root_multisig.rs
  88. 2
      rust/main/agents/relayer/src/msg/metadata/multisig/message_id_multisig.rs
  89. 1
      rust/main/agents/relayer/src/msg/metadata/multisig/mod.rs
  90. 1
      rust/main/agents/relayer/src/msg/metadata/null_metadata.rs
  91. 1
      rust/main/agents/relayer/src/msg/metadata/routing.rs
  92. 2
      rust/main/agents/relayer/src/msg/mod.rs
  93. 95
      rust/main/agents/relayer/src/msg/op_queue.rs
  94. 73
      rust/main/agents/relayer/src/msg/op_submitter.rs
  95. 57
      rust/main/agents/relayer/src/msg/pending_message.rs
  96. 185
      rust/main/agents/relayer/src/msg/processor.rs
  97. 0
      rust/main/agents/relayer/src/processor.rs
  98. 0
      rust/main/agents/relayer/src/prover.rs
  99. 4
      rust/main/agents/relayer/src/relayer.rs
  100. 89
      rust/main/agents/relayer/src/server/list_messages.rs
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,5 +0,0 @@
---
'@hyperlane-xyz/cli': minor
---
Add CLI e2e typescript tests

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/sdk': patch
---
Optimize HyperlaneRelayer routing config derivation

@ -1,8 +1,8 @@
{
"$schema": "https://unpkg.com/@changesets/config@2.3.1/schema.json",
"changelog": "@changesets/cli/changelog",
"commit": false,
"fixed": [["@hyperlane-xyz/*"]],
"commit": true,
"fixed": [["@hyperlane-xyz/!(core)|*"]],
"linked": [],
"access": "public",
"baseBranch": "main",

@ -0,0 +1,6 @@
---
'@hyperlane-xyz/sdk': minor
'@hyperlane-xyz/core': minor
---
Checking for sufficient fees in `AbstractMessageIdAuthHook` and refund surplus

@ -0,0 +1,6 @@
---
'@hyperlane-xyz/utils': patch
'@hyperlane-xyz/sdk': patch
---
Dedupe internals of hook and ISM module deploy code

@ -1,8 +0,0 @@
---
'@hyperlane-xyz/infra': minor
'@hyperlane-xyz/cli': minor
'@hyperlane-xyz/sdk': minor
'@hyperlane-xyz/core': minor
---
Added sdk support for Stake weighted ISM

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/sdk': minor
---
Deploy to apechain, arbitrumnova, b3, fantom, gravity, harmony, kaia, morph, orderly, snaxchain, zeronetwork, zksync. Update default metadata in `HyperlaneCore` to `0x00001` to ensure empty metadata does not break on zksync.

@ -1,5 +0,0 @@
---
'@hyperlane-xyz/sdk': minor
---
Enroll new validators for cyber degenchain kroma lisk lukso merlin metis mint proofofplay real sanko tangle xai taiko

@ -1,5 +0,0 @@
---
'@hyperlane-xyz/sdk': patch
---
Estimate and add 10% gas bump for ICA initialization and enrollment

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/utils': patch
---
fix median utils func + add test

@ -3,4 +3,4 @@
'@hyperlane-xyz/sdk': minor
---
Add ChainSubmissionStrategySchema
Add feat to allow updates to destination gas using warp apply

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/core': minor
---
Added msg.value to preverifyMessage to commit it as part of external hook payload

@ -1,5 +0,0 @@
---
'@hyperlane-xyz/sdk': patch
---
Support DefaultFallbackRoutingIsm in metadata builder

@ -0,0 +1,11 @@
---
'@hyperlane-xyz/widgets': minor
---
Update widgets with components from explorer and warp ui
- Add icons: Discord, Docs, Github, History, LinkedIn, Medium, Twitter, Wallet and Web
- Add animation component: Fade component
- Add components: DatetimeField and SelectField
- New stories: IconList and Fade
- Add "Icon" suffix for icons that did not have it

@ -1,5 +0,0 @@
---
'@hyperlane-xyz/sdk': minor
---
Sorted cwNative funds by denom in transfer tx

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/core': minor
---
disabled the ICARouter's ability to change hook given that the user doesn't expect the hook to change after they deploy their ICA account. Hook is not part of the derivation like ism on the destination chain and hence, cannot be configured custom by the user.

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/cli': minor
---
Enable configuration of IGP hooks in the CLI

@ -1,6 +0,0 @@
---
'@hyperlane-xyz/sdk': minor
'@hyperlane-xyz/core': minor
---
ArbL2ToL1Ism handles value via the executeTransaction branch

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/sdk': patch
---
Fix ICA ISM self relay

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/sdk': minor
---
Introduce utils that can be reused by the CLI and Infra for fetching token prices from Coingecko and gas prices from EVM/Cosmos chains.

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/utils': patch
---
Filter undefined/null values in invertKeysAndValues function

@ -1,5 +0,0 @@
---
'@hyperlane-xyz/sdk': patch
---
Improved check for mailbox initialization

@ -1,6 +0,0 @@
---
'@hyperlane-xyz/cli': minor
'@hyperlane-xyz/sdk': minor
---
Add Safe submit functionality to warp apply

@ -1,5 +1,5 @@
[codespell]
skip = .git,node_modules,yarn.lock,Cargo.lock,./typescript/helloworld,./rust/config,./rust/sealevel/environments/mainnet3/chain-config.json
skip = .git,node_modules,yarn.lock,Cargo.lock,./typescript/helloworld,./rust/main/config,./rust/sealevel/environments/mainnet3/chain-config.json
count =
quiet-level = 3
ignore-words = ./.codespell/ignore.txt

2
.gitattributes vendored

@ -1,4 +1,4 @@
typescript/sdk/src/cw-types/*.types.ts linguist-generated=true
rust/chains/hyperlane-ethereum/abis/*.abi.json linguist-generated=true
rust/main/chains/hyperlane-ethereum/abis/*.abi.json linguist-generated=true
solidity/contracts/interfaces/avs/*.sol linguist-vendored=true
solidity/contracts/avs/ECDSA*.sol linguist-vendored=true

@ -1,29 +1,22 @@
# File extension owners
*.sol @yorhodes @tkporter @aroralanuk @nbayindirli
*.ts @yorhodes @jmrossy @nbayindirli
*.rs @tkporter @daniel-savu
*.md @Skunkchain @avious00
*.sol @yorhodes @aroralanuk @ltyu
*.ts @yorhodes @jmrossy
*.rs @tkporter @daniel-savu @ameten
# Package owners
## Contracts
solidity/ @yorhodes @tkporter @aroralanuk @nbayindirli
solidity/ @yorhodes @tkporter @aroralanuk @ltyu
## Agents
rust/ @tkporter @daniel-savu
## SDK
typescript/sdk @yorhodes @jmrossy
## Token
typescript/token @yorhodes @jmrossy @tkporter @aroralanuk @nbayindirli
## Hello World
typescript/helloworld @yorhodes
typescript/sdk @yorhodes @jmrossy @ltyu @paulbalaji
## CLI
typescript/cli @jmrossy @yorhodes @aroralanuk @nbayindirli
typescript/cli @jmrossy @yorhodes @ltyu
## Infra
typescript/infra @tkporter
typescript/infra @tkporter @paulbalaji @Mo-Hussain

@ -0,0 +1,37 @@
name: 'Yarn Build with Cache'
description: 'Run yarn build using yarn cache'
inputs:
ref:
description: 'The Git ref to checkout'
required: true
runs:
using: "composite"
steps:
- name: Cache
uses: buildjet/cache@v4
id: cache
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
# Typically, the cache will be hit, but if there's a network error when
# restoring the cache, let's run the install step ourselves.
- name: Install dependencies
if: steps.cache.outputs.cache-hit != 'true'
shell: bash
run: |
yarn install
CHANGES=$(git status -s --ignore-submodules)
if [[ ! -z $CHANGES ]]; then
echo "Changes found: $CHANGES"
git diff
exit 1
fi
- name: Build
shell: bash
run: yarn build

@ -16,7 +16,7 @@ env:
jobs:
prepare:
runs-on: larger-runner
runs-on: ubuntu-latest
outputs:
tag_date: ${{ steps.taggen.outputs.TAG_DATE }}
tag_sha: ${{ steps.taggen.outputs.TAG_SHA }}
@ -58,30 +58,28 @@ jobs:
linker = "aarch64-linux-gnu-gcc"
EOF
- name: setup rust
uses: actions-rs/toolchain@v1
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
profile: minimal
target: ${{ matrix.TARGET }}
- name: setup target
run: rustup target add ${{ matrix.TARGET }}
working-directory: ./rust
working-directory: ./rust/main
- name: build
run: cargo build --release --target ${{ matrix.TARGET }} --bin relayer --bin scraper --bin validator
working-directory: ./rust
working-directory: ./rust/main
- name: make executable
if: ${{ matrix.OS == 'larger-runner' || matrix.OS == 'macos-latest' }}
run: chmod ug+x,-w relayer scraper validator
working-directory: rust/target/${{ matrix.TARGET }}/release
working-directory: rust/main/target/${{ matrix.TARGET }}/release
- name: upload binaries
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.TARGET }}-${{ needs.prepare.outputs.tag_sha }}-${{ needs.prepare.outputs.tag_date }}
path: |
rust/target/${{ matrix.TARGET }}/release/relayer
rust/target/${{ matrix.TARGET }}/release/relayer.exe
rust/target/${{ matrix.TARGET }}/release/scraper
rust/target/${{ matrix.TARGET }}/release/scraper.exe
rust/target/${{ matrix.TARGET }}/release/validator
rust/target/${{ matrix.TARGET }}/release/validator.exe
rust/main/target/${{ matrix.TARGET }}/release/relayer
rust/main/target/${{ matrix.TARGET }}/release/relayer.exe
rust/main/target/${{ matrix.TARGET }}/release/scraper
rust/main/target/${{ matrix.TARGET }}/release/scraper.exe
rust/main/target/${{ matrix.TARGET }}/release/validator
rust/main/target/${{ matrix.TARGET }}/release/validator.exe
if-no-files-found: error

@ -21,15 +21,13 @@ jobs:
- name: Checkout the repository
uses: actions/checkout@v4
- name: pip cache
uses: actions/cache@v4
- name: Setup python
uses: actions/setup-python@v5
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: ${{ runner.os }}-pip-
python-version: '3.x'
- name: Install prerequisites
run: sudo pip install -r ./.codespell/requirements.txt
- name: Install codespell requirements
run: pip install -r ./.codespell/requirements.txt
- name: Spell check
run: codespell --config=./.codespell/.codespellrc
run: codespell --config=./.codespell/.codespellrc

@ -29,7 +29,7 @@ jobs:
GCLOUD_SERVICE_KEY: ${{ secrets.GCLOUD_SERVICE_KEY }}
if: "${{ env.GCLOUD_SERVICE_KEY != '' }}"
# runs if GCLOUD_SERVICE_KEY is defined, so we set the output to true
run: echo "::set-output name=defined::true"
run: echo "defined=true" >> $GITHUB_OUTPUT
build-and-push-to-gcr:
runs-on: ubuntu-latest
@ -77,7 +77,7 @@ jobs:
echo "REGISTRY_VERSION=$REGISTRY_VERSION" >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
context: ./
file: ./Dockerfile

@ -24,10 +24,10 @@ jobs:
GCLOUD_SERVICE_KEY: ${{ secrets.GCLOUD_SERVICE_KEY }}
if: "${{ env.GCLOUD_SERVICE_KEY != '' }}"
# runs if GCLOUD_SERVICE_KEY is defined, so we set the output to true
run: echo "::set-output name=defined::true"
run: echo "defined=true" >> $GITHUB_OUTPUT
build-and-push-to-gcr:
runs-on: ubuntu-latest
runs-on: buildjet-8vcpu-ubuntu-2204
# uses check-env to determine if secrets.GCLOUD_SERVICE_KEY is defined
needs: [check-env]
@ -63,12 +63,10 @@ jobs:
username: _json_key
password: ${{ secrets.GCLOUD_SERVICE_KEY }}
- name: Build and push
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
context: .
file: ./rust/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max

@ -4,16 +4,18 @@ name: rust
on:
push:
branches: [main]
paths-ignore:
- 'rust/**'
- .github/workflows/rust.yml
paths:
- '**/*'
- '!rust/main/**'
- '!rust/sealevel/**'
- '!.github/workflows/rust.yml'
pull_request:
branches: [main]
paths-ignore:
- 'rust/**'
- .github/workflows/rust.yml
# Support for merge queues
merge_group:
paths:
- '**/*'
- '!rust/main/**'
- '!rust/sealevel/**'
- '!.github/workflows/rust.yml'
env:
CARGO_TERM_COLOR: always

@ -1,19 +1,23 @@
name: rust
on:
# Triggers the workflow on pushes to main branch
# only if rust/** or .github/workflows/rust.yml is changed
push:
branches: [main]
paths:
- 'rust/**'
- 'rust/main/**'
- 'rust/sealevel/**'
- .github/workflows/rust.yml
# Triggers the workflow on pull requests
# only if rust/** or .github/workflows/rust.yml is changed
pull_request:
branches: [main]
paths:
- 'rust/**'
- 'rust/main/**'
- 'rust/sealevel/**'
- .github/workflows/rust.yml
# Support for merge queues
merge_group:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
concurrency:
@ -24,71 +28,84 @@ env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: full
defaults:
run:
working-directory: ./rust
jobs:
test-rs:
runs-on: larger-runner
runs-on: buildjet-8vcpu-ubuntu-2204
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
- uses: dtolnay/rust-toolchain@stable
- name: rust cache
uses: Swatinem/rust-cache@v2
with:
prefix-key: "v3-rust"
prefix-key: 'v3'
shared-key: 'rust'
cache-provider: 'buildjet'
save-if: ${{ !startsWith(github.ref, 'refs/heads/gh-readonly-queue') }}
workspaces: |
./rust
./rust/main
./rust/sealevel
- name: Free disk space
run: |
# Based on https://github.com/actions/runner-images/issues/2840#issuecomment-790492173
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Run tests
- name: Run tests for main workspace
run: cargo test
working-directory: ./rust/main
- name: Run tests for sealevel workspace
run: cargo test
working-directory: ./rust/sealevel
lint-rs:
runs-on: larger-runner
runs-on: buildjet-8vcpu-ubuntu-2204
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- uses: actions-rs/toolchain@v1
- uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
profile: minimal
components: rustfmt, clippy
target: wasm32-unknown-unknown
- name: rust cache
uses: Swatinem/rust-cache@v2
with:
prefix-key: "v3-rust"
prefix-key: 'v3'
shared-key: 'rust'
cache-provider: 'buildjet'
save-if: ${{ !startsWith(github.ref, 'refs/heads/gh-readonly-queue') }}
workspaces: |
./rust
./rust/main
./rust/sealevel
- name: Free disk space
run: |
# Based on https://github.com/actions/runner-images/issues/2840#issuecomment-790492173
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Check
- name: Check for main workspace
run: cargo check --release --all-features --all-targets
working-directory: ./rust/main
- name: Check for sealevel workspace
run: cargo check --release --all-features --all-targets
- name: Rustfmt
working-directory: ./rust/sealevel
- name: Rustfmt for main workspace
run: cargo fmt --all -- --check
- name: Clippy
working-directory: ./rust/main
- name: Rustfmt for sealevel workspace
run: cargo fmt --all --check
working-directory: ./rust/sealevel
- name: Clippy for main workspace
run: cargo clippy -- -D warnings
working-directory: ./rust/main
- name: Clippy for sealevel workspace
run: cargo clippy -- -D warnings
- name: Setup WASM
working-directory: ./rust/sealevel
- name: Setup WASM for main workspace
run: rustup target add wasm32-unknown-unknown
- name: Check WASM
working-directory: ./rust/main
- name: Check WASM for hyperlane-core
run: cargo check --release -p hyperlane-core --features=strum,test-utils --target wasm32-unknown-unknown
working-directory: ./rust/main

@ -24,12 +24,14 @@ jobs:
submodules: recursive
- name: yarn-cache
uses: actions/cache@v4
uses: buildjet/cache@v4
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-cache-
- name: yarn-install
run: yarn install
@ -54,3 +56,18 @@ jobs:
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: ${{ steps.slither.outputs.sarif }}
category: "slither"
- name: Olympix Integrated Security
uses: olympix/integrated-security@main
env:
OLYMPIX_API_TOKEN: ${{ secrets.OLYMPIX_API_TOKEN }}
OLYMPIX_CLI_LOG_LEVEL: 0
with:
args: -p ./solidity/contracts --output-format sarif --output-path ./
- name: Upload result to GitHub Code Scanning
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: olympix.sarif
category: "olympix"

@ -24,12 +24,14 @@ jobs:
node-version: 18
- name: yarn-cache
uses: actions/cache@v4
uses: buildjet/cache@v4
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-cache-
- name: yarn-install
run: yarn install

@ -1,106 +0,0 @@
name: test
on:
push:
branches: [main]
paths:
- '*.md'
- '!**/*'
pull_request:
branches:
- '*'
paths:
- '*.md'
- '!**/*'
merge_group:
concurrency:
group: e2e-${{ github.ref }}
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
yarn-install:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "yarn-install job passed"
yarn-build:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "yarn-build job passed"
lint-prettier:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "lint-prettier job passed"
yarn-test:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "yarn-test job passed"
agent-configs:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
environment: [mainnet3, testnet4]
steps:
- name: Instant pass
run: echo "agent-configs job passed"
e2e-matrix:
runs-on: ubuntu-latest
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main') || github.event_name == 'merge_group'
strategy:
matrix:
e2e-type: [cosmwasm, non-cosmwasm]
steps:
- name: Instant pass
run: echo "e2e-matrix job passed"
e2e:
runs-on: ubuntu-latest
if: always()
steps:
- name: Instant pass
run: echo "e2e job passed"
cli-advanced-e2e:
runs-on: ubuntu-latest
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main') || github.event_name == 'merge_group'
strategy:
matrix:
include:
- test-type: preset_hook_enabled
- test-type: configure_hook_enabled
- test-type: pi_with_core_chain
steps:
- name: Instant pass
run: echo "cli-advanced-e2e job passed"
env-test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
environment: [mainnet3]
chain: [ethereum, arbitrum, optimism, inevm, viction]
module: [core, igp]
include:
- environment: testnet4
chain: sepolia
module: core
steps:
- name: Instant pass
run: echo "env-test job passed"
coverage:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "coverage job passed"

@ -1,16 +1,14 @@
name: test
on:
# Triggers the workflow on pushes to main branch
# Triggers the workflow on pushes to main branch, ignoring md files
push:
branches: [main]
# Triggers on pull requests ignoring md files
# Triggers on pull requests, ignoring md files
pull_request:
branches:
- '*' # run against all branches
# Support for merge queues
- '*'
merge_group:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
concurrency:
@ -37,12 +35,14 @@ jobs:
submodules: recursive
- name: yarn-cache
uses: actions/cache@v4
uses: buildjet/cache@v4
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-cache-
- name: yarn-install
run: |
@ -54,35 +54,6 @@ jobs:
exit 1
fi
yarn-build:
runs-on: ubuntu-latest
needs: [yarn-install]
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive
fetch-depth: 0
- name: yarn-cache
uses: actions/cache@v4
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache
uses: actions/cache@v4
with:
path: |
./*
!./rust
key: ${{ github.event.pull_request.head.sha || github.sha }}
- name: build
run: yarn build
lint-prettier:
runs-on: ubuntu-latest
needs: [yarn-install]
@ -94,12 +65,13 @@ jobs:
fetch-depth: 0
- name: yarn-cache
uses: actions/cache@v4
uses: buildjet/cache@v4
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
fail-on-cache-miss: true
- name: lint
run: yarn lint
@ -115,7 +87,7 @@ jobs:
yarn-test:
runs-on: ubuntu-latest
needs: [yarn-build]
needs: [yarn-install]
steps:
- uses: actions/checkout@v4
with:
@ -126,13 +98,10 @@ jobs:
- name: foundry-install
uses: foundry-rs/foundry-toolchain@v1
- name: build-cache
uses: actions/cache@v4
- name: yarn-build
uses: ./.github/actions/yarn-build-with-cache
with:
path: |
./*
!./rust
key: ${{ github.event.pull_request.head.sha || github.sha }}
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Checkout registry
uses: ./.github/actions/checkout-registry
@ -140,9 +109,33 @@ jobs:
- name: Unit Tests
run: yarn test:ci
cli-e2e:
runs-on: ubuntu-latest
needs: [yarn-install]
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive
fetch-depth: 0
- name: foundry-install
uses: foundry-rs/foundry-toolchain@v1
- name: yarn-build
uses: ./.github/actions/yarn-build-with-cache
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Checkout registry
uses: ./.github/actions/checkout-registry
- name: CLI e2e tests
run: yarn --cwd typescript/cli test:e2e
agent-configs:
runs-on: ubuntu-latest
needs: [yarn-build]
needs: [yarn-install]
strategy:
fail-fast: false
matrix:
@ -153,21 +146,10 @@ jobs:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
fetch-depth: 0
- name: yarn-cache
uses: actions/cache@v4
- name: yarn-build
uses: ./.github/actions/yarn-build-with-cache
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache
uses: actions/cache@v4
with:
path: |
./*
!./rust
key: ${{ github.event.pull_request.head.sha || github.sha }}
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Checkout registry
uses: ./.github/actions/checkout-registry
@ -183,9 +165,9 @@ jobs:
fi
e2e-matrix:
runs-on: larger-runner
runs-on: buildjet-8vcpu-ubuntu-2204
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main' || github.base_ref == 'cli-2.0') || github.event_name == 'merge_group'
needs: [yarn-build]
needs: [yarn-install]
strategy:
fail-fast: false
matrix:
@ -199,23 +181,24 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive
fetch-depth: 0
- name: foundry-install
uses: foundry-rs/foundry-toolchain@v1
- name: setup rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
uses: dtolnay/rust-toolchain@stable
- name: rust cache
uses: Swatinem/rust-cache@v2
with:
prefix-key: "v1-${{ runner.os }}-rust-cache"
prefix-key: 'v2-rust-e2e'
shared-key: ${{ matrix.e2e-type }}
cache-provider: 'buildjet'
save-if: ${{ !startsWith(github.ref, 'refs/heads/gh-readonly-queue') }}
workspaces: |
./rust
./rust/main
${{ matrix.e2e-type == 'non-cosmwasm' && './rust/sealevel' || '' }}
- name: Free disk space
run: |
@ -231,21 +214,10 @@ jobs:
mold-version: 2.0.0
make-default: true
- name: yarn-cache
uses: actions/cache@v4
- name: yarn-build
uses: ./.github/actions/yarn-build-with-cache
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache
uses: actions/cache@v4
with:
path: |
./*
!./rust
key: ${{ github.event.pull_request.head.sha || github.sha }}
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Install system dependencies
run: |
@ -258,151 +230,30 @@ jobs:
- name: agent tests (CosmWasm)
run: cargo test --release --package run-locally --bin run-locally --features cosmos test-utils -- cosmos::test --nocapture
if: matrix.e2e-type == 'cosmwasm'
working-directory: ./rust
working-directory: ./rust/main
env:
RUST_BACKTRACE: 'full'
- name: Check for Rust file changes
id: check-rust-changes
run: |
if [[ -n "$(git diff ${{ github.event.pull_request.head.sha || github.sha }} ${{ github.event.pull_request.base.sha }} -- ./rust)" ]]; then
echo "rust_changes=true" >> $GITHUB_OUTPUT
echo "$(git diff ${{ github.event.pull_request.head.sha || github.sha }} ${{ github.event.pull_request.base.sha }} -- ./rust)"
else
echo "rust_changes=false" >> $GITHUB_OUTPUT
fi
- name: agent tests (EVM and Sealevel)
run: cargo run --release --bin run-locally --features test-utils
if: matrix.e2e-type == 'non-cosmwasm'
working-directory: ./rust
working-directory: ./rust/main
env:
E2E_CI_MODE: 'true'
E2E_CI_TIMEOUT_SEC: '600'
E2E_KATHY_MESSAGES: '20'
RUST_BACKTRACE: 'full'
e2e:
runs-on: ubuntu-latest
needs: [e2e-matrix]
if: always() # This ensures that the job runs even if the e2e jobs fail
steps:
- name: Report Matrix Result
run: |
echo "All e2e-matrix jobs have completed."
# You can add additional commands here to report the result as needed
prebuild-cli-e2e:
runs-on: larger-runner
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main' || github.base_ref == 'cli-2.0') || github.event_name == 'merge_group'
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive
- name: setup rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
- name: rust cache
uses: Swatinem/rust-cache@v2
with:
prefix-key: "v1-${{ runner.os }}-rust-cache"
shared-key: "cli-e2e"
workspaces: |
./rust
- name: Free disk space
run: |
# Based on https://github.com/actions/runner-images/issues/2840#issuecomment-790492173
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Install mold linker
uses: rui314/setup-mold@v1
with:
mold-version: 2.0.0
make-default: true
- name: Build validator
run: cargo build --bin validator --features test-utils
working-directory: ./rust
env:
RUST_BACKTRACE: 'full'
- name: Build relayer
run: cargo build --bin relayer --features test-utils
working-directory: ./rust
env:
RUST_BACKTRACE: 'full'
cli-advanced-e2e:
runs-on: larger-runner
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main' || github.base_ref == 'cli-2.0') || github.event_name == 'merge_group'
needs: [yarn-build, prebuild-cli-e2e]
strategy:
matrix:
include:
- test-type: preset_hook_enabled
- test-type: configure_hook_enabled
- test-type: pi_with_core_chain
steps:
- uses: actions/setup-node@v4
with:
node-version: 18
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive
- name: foundry-install
uses: foundry-rs/foundry-toolchain@v1
- name: setup rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
- name: rust cache
uses: Swatinem/rust-cache@v2
with:
prefix-key: "v1-${{ runner.os }}-rust-cache"
shared-key: "cli-e2e"
workspaces: |
./rust
- name: Free disk space
run: |
# Based on https://github.com/actions/runner-images/issues/2840#issuecomment-790492173
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Install mold linker
uses: rui314/setup-mold@v1
with:
mold-version: 2.0.0
make-default: true
- name: yarn-cache
uses: actions/cache@v4
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache
uses: actions/cache@v4
with:
path: |
./*
!./rust
key: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Checkout registry
uses: ./.github/actions/checkout-registry
- name: cli e2e tests
run: ./typescript/cli/ci-advanced-test.sh ${{ matrix.test-type }}
SEALEVEL_ENABLED: ${{ steps.check-rust-changes.outputs.rust_changes }}
env-test:
runs-on: ubuntu-latest
@ -411,7 +262,7 @@ jobs:
MAINNET3_OPTIMISM_RPC_URLS: ${{ secrets.MAINNET3_OPTIMISM_RPC_URLS }}
timeout-minutes: 10
needs: [yarn-build]
needs: [yarn-install]
strategy:
fail-fast: false
matrix:
@ -431,13 +282,10 @@ jobs:
- name: foundry-install
uses: foundry-rs/foundry-toolchain@v1
- name: build-cache
uses: actions/cache@v4
- name: yarn-build
uses: ./.github/actions/yarn-build-with-cache
with:
path: |
./*
!./rust
key: ${{ github.event.pull_request.head.sha || github.sha }}
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Checkout registry
uses: ./.github/actions/checkout-registry
@ -447,29 +295,17 @@ jobs:
coverage:
runs-on: ubuntu-latest
needs: [yarn-test]
needs: [yarn-install]
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
fetch-depth: 0
- name: yarn-cache
uses: actions/cache@v4
with:
path: |
**/node_modules
.yarn
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache
uses: actions/cache@v4
- name: yarn-build
uses: ./.github/actions/yarn-build-with-cache
with:
path: |
./*
!./rust
key: ${{ github.event.pull_request.head.sha || github.sha }}
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: foundry-install
uses: foundry-rs/foundry-toolchain@v1

1
.gitignore vendored

@ -26,6 +26,5 @@ yarn-error.log
.idea
**/*.ignore
.vscode
tsconfig.editor.json

@ -7,6 +7,9 @@ echo "📝 If you haven't yet, please add a changeset for your changes via 'yarn
# if any *.rs files have changed
if git diff --staged --exit-code --name-only | grep -q -E ".*\.rs$"; then
echo "Running cargo fmt pre-commit hook"
cargo fmt --all --check --manifest-path rust/Cargo.toml
echo "Running cargo fmt pre-commit hook for rust/main"
cargo fmt --all --check --manifest-path rust/main/Cargo.toml
echo "Running cargo fmt pre-commit hook for rust/sealevel"
cargo fmt --all --check --manifest-path rust/sealevel/Cargo.toml
fi

@ -1 +1 @@
1d43e33fc84f486d0edf20a9e573f914e53fe94c
302be4817c063629cec70c0b02322b250df71122

@ -0,0 +1,6 @@
{
"rust-analyzer.linkedProjects": [
"./rust/main/Cargo.toml",
"./rust/sealevel/Cargo.toml",
],
}

@ -18,6 +18,7 @@ COPY typescript/cli/package.json ./typescript/cli/
COPY typescript/infra/package.json ./typescript/infra/
COPY typescript/ccip-server/package.json ./typescript/ccip-server/
COPY typescript/widgets/package.json ./typescript/widgets/
COPY typescript/github-proxy/package.json ./typescript/github-proxy/
COPY solidity/package.json ./solidity/
RUN yarn install && yarn cache clean

@ -20,6 +20,10 @@
"cSpell.words": [
"hyperlane"
],
"rust-analyzer.linkedProjects": [
"./rust/main/Cargo.toml",
"./rust/sealevel/Cargo.toml",
],
},
"folders": [
{
@ -33,7 +37,10 @@
"path": "./solidity"
},
{
"path": "./rust"
"path": "./rust/main"
},
{
"path": "./rust/sealevel"
}
],
"extensions": {
@ -66,6 +73,8 @@
"yoavbls.pretty-ts-errors",
// Yaml language support
"redhat.vscode-yaml",
// Rust language support
"rust-lang.rust-analyzer"
],
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
"unwantedRecommendations": []

@ -20,6 +20,7 @@
"packageManager": "yarn@4.0.2",
"private": true,
"scripts": {
"agent-configs": "yarn --cwd typescript/infra/ update-agent-config:mainnet3 && yarn --cwd typescript/infra/ update-agent-config:testnet4 && yarn prettier",
"build": "yarn workspaces foreach --all --parallel --topological run build",
"clean": "yarn workspaces foreach --all --parallel run clean",
"prettier": "yarn workspaces foreach --since --parallel run prettier",

@ -1,6 +1,6 @@
# syntax=docker/dockerfile:experimental
FROM rust:1.72.1 as builder
FROM rust:1.80.1 as builder
WORKDIR /usr/src
# 1a: Prepare for static linking
@ -9,35 +9,34 @@ RUN apt-get update && \
apt-get install -y musl-tools clang && \
rustup target add x86_64-unknown-linux-musl
RUN mkdir rust
RUN mkdir -p rust/main
RUN mkdir -p rust/sealevel
# Add workspace to workdir
COPY rust/agents rust/agents
COPY rust/chains rust/chains
COPY rust/hyperlane-base rust/hyperlane-base
COPY rust/hyperlane-core rust/hyperlane-core
COPY rust/hyperlane-test rust/hyperlane-test
COPY rust/ethers-prometheus rust/ethers-prometheus
COPY rust/utils rust/utils
COPY rust/main/agents rust/main/agents
COPY rust/main/chains rust/main/chains
COPY rust/main/hyperlane-base rust/main/hyperlane-base
COPY rust/main/hyperlane-core rust/main/hyperlane-core
COPY rust/main/hyperlane-test rust/main/hyperlane-test
COPY rust/main/ethers-prometheus rust/main/ethers-prometheus
COPY rust/main/utils rust/main/utils
COPY rust/sealevel rust/sealevel
COPY rust/Cargo.toml rust/.
COPY rust/Cargo.lock rust/.
COPY rust/main/Cargo.toml rust/main/.
COPY rust/main/Cargo.lock rust/main/.
# Required for VERGEN_GIT_SHA to be populated
COPY .git .git
WORKDIR /usr/src/rust
WORKDIR /usr/src/rust/main
# Build binaries
RUN \
--mount=id=cargo,type=cache,sharing=locked,target=/usr/src/target \
--mount=id=cargo-home-registry,type=cache,sharing=locked,target=/usr/local/cargo/registry \
--mount=id=cargo-home-git,type=cache,sharing=locked,target=/usr/local/cargo/git \
RUSTFLAGS="--cfg tokio_unstable" cargo build --release --bin validator --bin relayer --bin scraper && \
mkdir -p /release && \
cp /usr/src/rust/target/release/validator /release && \
cp /usr/src/rust/target/release/relayer /release && \
cp /usr/src/rust/target/release/scraper /release
cp /usr/src/rust/main/target/release/validator /release && \
cp /usr/src/rust/main/target/release/relayer /release && \
cp /usr/src/rust/main/target/release/scraper /release
## 2: Copy the binaries to release image
FROM ubuntu:22.04
@ -49,7 +48,8 @@ RUN apt-get update && \
rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY rust/config ./config
RUN mkdir -p /app/config
COPY rust/main/config /app/config
COPY --from=builder /release/* .
RUN chmod 777 /app && \

@ -90,7 +90,7 @@ env $(cat ./config/validator.fuji.env | grep -v "#" | xargs) ./target/debug/vali
Clone `hyperlane-registry` repo next to `hyperlane-monorepo` repo.
To perform an automated e2e test of the agents locally, from within the `hyperlane-monorepo/rust` directory, run:
To perform an automated e2e test of the agents locally, from within the `hyperlane-monorepo/rust/main` directory, run:
```bash
cargo run --release --bin run-locally
@ -117,7 +117,7 @@ cd rust
### Deploy Procedure
The contract addresses of each deploy can be found in `rust/config`. The agents will
The contract addresses of each deploy can be found in `rust/main/config`. The agents will
automatically pull in all configs in this directory.
When agents are deployed to point at a new environment, they cease to point at

@ -1,170 +0,0 @@
use axum::{
extract::{Query, State},
routing, Router,
};
use derive_new::new;
use hyperlane_core::{ChainCommunicationError, QueueOperation, H256};
use serde::Deserialize;
use std::str::FromStr;
use tokio::sync::broadcast::Sender;
const MESSAGE_RETRY_API_BASE: &str = "/message_retry";
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum MessageRetryRequest {
MessageId(H256),
DestinationDomain(u32),
}
impl PartialEq<QueueOperation> for &MessageRetryRequest {
fn eq(&self, other: &QueueOperation) -> bool {
match self {
MessageRetryRequest::MessageId(message_id) => message_id == &other.id(),
MessageRetryRequest::DestinationDomain(destination_domain) => {
destination_domain == &other.destination_domain().id()
}
}
}
}
#[derive(new, Clone)]
pub struct MessageRetryApi {
tx: Sender<MessageRetryRequest>,
}
#[derive(Deserialize)]
struct RawMessageRetryRequest {
message_id: Option<String>,
destination_domain: Option<u32>,
}
impl TryFrom<RawMessageRetryRequest> for Vec<MessageRetryRequest> {
type Error = ChainCommunicationError;
fn try_from(request: RawMessageRetryRequest) -> Result<Self, Self::Error> {
let mut retry_requests = Vec::new();
if let Some(message_id) = request.message_id {
retry_requests.push(MessageRetryRequest::MessageId(H256::from_str(&message_id)?));
}
if let Some(destination_domain) = request.destination_domain {
retry_requests.push(MessageRetryRequest::DestinationDomain(destination_domain));
}
Ok(retry_requests)
}
}
async fn retry_message(
State(tx): State<Sender<MessageRetryRequest>>,
Query(request): Query<RawMessageRetryRequest>,
) -> String {
let retry_requests: Vec<MessageRetryRequest> = match request.try_into() {
Ok(retry_requests) => retry_requests,
// Technically it's bad practice to print the error message to the user, but
// this endpoint is for debugging purposes only.
Err(err) => {
return format!("Failed to parse retry request: {}", err);
}
};
if retry_requests.is_empty() {
return "No retry requests found. Please provide either a message_id or destination_domain.".to_string();
}
if let Err(err) = retry_requests
.into_iter()
.map(|req| tx.send(req))
.collect::<Result<Vec<_>, _>>()
{
return format!("Failed to send retry request to the queue: {}", err);
}
"Moved message(s) to the front of the queue".to_string()
}
impl MessageRetryApi {
pub fn router(&self) -> Router {
Router::new()
.route("/", routing::get(retry_message))
.with_state(self.tx.clone())
}
pub fn get_route(&self) -> (&'static str, Router) {
(MESSAGE_RETRY_API_BASE, self.router())
}
}
#[cfg(test)]
mod tests {
use crate::server::ENDPOINT_MESSAGES_QUEUE_SIZE;
use super::*;
use axum::http::StatusCode;
use ethers::utils::hex::ToHex;
use std::net::SocketAddr;
use tokio::sync::broadcast::{Receiver, Sender};
fn setup_test_server() -> (SocketAddr, Receiver<MessageRetryRequest>) {
let broadcast_tx = Sender::<MessageRetryRequest>::new(ENDPOINT_MESSAGES_QUEUE_SIZE);
let message_retry_api = MessageRetryApi::new(broadcast_tx.clone());
let (path, retry_router) = message_retry_api.get_route();
let app = Router::new().nest(path, retry_router);
// Running the app in the background using a test server
let server =
axum::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve(app.into_make_service());
let addr = server.local_addr();
tokio::spawn(server);
(addr, broadcast_tx.subscribe())
}
#[tokio::test]
async fn test_message_id_retry() {
let (addr, mut rx) = setup_test_server();
// Create a random message ID
let message_id = H256::random();
// Send a GET request to the server
let response = reqwest::get(format!(
"http://{}{}?message_id={}",
addr,
MESSAGE_RETRY_API_BASE,
message_id.encode_hex::<String>()
))
.await
.unwrap();
// Check that the response status code is OK
assert_eq!(response.status(), StatusCode::OK);
assert_eq!(
rx.try_recv().unwrap(),
MessageRetryRequest::MessageId(message_id)
);
}
#[tokio::test]
async fn test_destination_domain_retry() {
let (addr, mut rx) = setup_test_server();
// Create a random destination domain
let destination_domain = 42;
// Send a GET request to the server
let response = reqwest::get(format!(
"http://{}{}?destination_domain={}",
addr, MESSAGE_RETRY_API_BASE, destination_domain
))
.await
.unwrap();
// Check that the response status code is OK
assert_eq!(response.status(), StatusCode::OK);
assert_eq!(
rx.try_recv().unwrap(),
MessageRetryRequest::DestinationDomain(destination_domain)
);
}
}

@ -1,39 +0,0 @@
use num_bigint::{BigInt, Sign};
use sea_orm::prelude::BigDecimal;
use hyperlane_core::{H256, U256};
// Creates a big-endian hex representation of the address
pub fn address_to_bytes(data: &H256) -> Vec<u8> {
if hex::is_h160(data.as_fixed_bytes()) {
// take the last 20 bytes
data.as_fixed_bytes()[12..32].into()
} else {
h256_to_bytes(data)
}
}
// Creates a big-endian hex representation of the address
pub fn bytes_to_address(data: Vec<u8>) -> eyre::Result<H256> {
if (data.len() != 20) && (data.len() != 32) {
return Err(eyre::eyre!("Invalid address length"));
}
if data.len() == 20 {
let mut prefix = vec![0; 12];
prefix.extend(data);
Ok(H256::from_slice(&prefix[..]))
} else {
Ok(H256::from_slice(&data[..]))
}
}
// Creates a big-endian hex representation of the address hash
pub fn h256_to_bytes(data: &H256) -> Vec<u8> {
data.as_fixed_bytes().as_slice().into()
}
pub fn u256_to_decimal(v: U256) -> BigDecimal {
let mut buf = [0u8; 32];
v.to_little_endian(&mut buf);
BigDecimal::from(BigInt::from_bytes_le(Sign::Plus, &buf as &[u8]))
}

@ -1,314 +0,0 @@
use std::num::NonZeroU64;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::vec;
use hyperlane_core::rpc_clients::call_and_retry_indefinitely;
use hyperlane_core::{ChainResult, MerkleTreeHook};
use prometheus::IntGauge;
use tokio::time::sleep;
use tracing::{debug, error, info};
use hyperlane_base::{db::HyperlaneRocksDB, CheckpointSyncer, CoreMetrics};
use hyperlane_core::{
accumulator::incremental::IncrementalMerkle, Checkpoint, CheckpointWithMessageId,
HyperlaneChain, HyperlaneContract, HyperlaneDomain, HyperlaneSignerExt,
};
use hyperlane_ethereum::SingletonSignerHandle;
#[derive(Clone)]
pub(crate) struct ValidatorSubmitter {
interval: Duration,
reorg_period: Option<NonZeroU64>,
signer: SingletonSignerHandle,
merkle_tree_hook: Arc<dyn MerkleTreeHook>,
checkpoint_syncer: Arc<dyn CheckpointSyncer>,
message_db: HyperlaneRocksDB,
metrics: ValidatorSubmitterMetrics,
}
impl ValidatorSubmitter {
pub(crate) fn new(
interval: Duration,
reorg_period: u64,
merkle_tree_hook: Arc<dyn MerkleTreeHook>,
signer: SingletonSignerHandle,
checkpoint_syncer: Arc<dyn CheckpointSyncer>,
message_db: HyperlaneRocksDB,
metrics: ValidatorSubmitterMetrics,
) -> Self {
Self {
reorg_period: NonZeroU64::new(reorg_period),
interval,
merkle_tree_hook,
signer,
checkpoint_syncer,
message_db,
metrics,
}
}
pub(crate) fn checkpoint(&self, tree: &IncrementalMerkle) -> Checkpoint {
Checkpoint {
root: tree.root(),
index: tree.index(),
merkle_tree_hook_address: self.merkle_tree_hook.address(),
mailbox_domain: self.merkle_tree_hook.domain().id(),
}
}
/// Submits signed checkpoints from index 0 until the target checkpoint (inclusive).
/// Runs idly forever once the target checkpoint is reached to avoid exiting the task.
pub(crate) async fn backfill_checkpoint_submitter(self, target_checkpoint: Checkpoint) {
let mut tree = IncrementalMerkle::default();
self.submit_checkpoints_until_correctness_checkpoint(&mut tree, &target_checkpoint)
.await;
info!(
?target_checkpoint,
"Backfill checkpoint submitter successfully reached target checkpoint"
);
}
/// Submits signed checkpoints indefinitely, starting from the `tree`.
pub(crate) async fn checkpoint_submitter(self, mut tree: IncrementalMerkle) {
// How often to log checkpoint info - once every minute
let checkpoint_info_log_period = Duration::from_secs(60);
// The instant in which we last logged checkpoint info, if at all
let mut latest_checkpoint_info_log: Option<Instant> = None;
// Returns whether checkpoint info should be logged based off the
// checkpoint_info_log_period having elapsed since the last log.
// Sets latest_checkpoint_info_log to the current instant if true.
let mut should_log_checkpoint_info = || {
if let Some(instant) = latest_checkpoint_info_log {
if instant.elapsed() < checkpoint_info_log_period {
return false;
}
}
latest_checkpoint_info_log = Some(Instant::now());
true
};
loop {
// Lag by reorg period because this is our correctness checkpoint.
let latest_checkpoint = call_and_retry_indefinitely(|| {
let merkle_tree_hook = self.merkle_tree_hook.clone();
Box::pin(async move { merkle_tree_hook.latest_checkpoint(self.reorg_period).await })
})
.await;
self.metrics
.latest_checkpoint_observed
.set(latest_checkpoint.index as i64);
if should_log_checkpoint_info() {
info!(
?latest_checkpoint,
tree_count = tree.count(),
"Latest checkpoint"
);
}
// This may occur e.g. if RPC providers are unreliable and make calls against
// inconsistent block tips.
//
// In this case, we just sleep a bit until we fetch a new latest checkpoint
// that at least meets the tree.
if tree_exceeds_checkpoint(&latest_checkpoint, &tree) {
debug!(
?latest_checkpoint,
tree_count = tree.count(),
"Latest checkpoint is behind tree, sleeping briefly"
);
sleep(self.interval).await;
continue;
}
self.submit_checkpoints_until_correctness_checkpoint(&mut tree, &latest_checkpoint)
.await;
self.metrics
.latest_checkpoint_processed
.set(latest_checkpoint.index as i64);
sleep(self.interval).await;
}
}
/// Submits signed checkpoints relating to the given tree until the correctness checkpoint (inclusive).
/// Only submits the signed checkpoints once the correctness checkpoint is reached.
async fn submit_checkpoints_until_correctness_checkpoint(
&self,
tree: &mut IncrementalMerkle,
correctness_checkpoint: &Checkpoint,
) {
// This should never be called with a tree that is ahead of the correctness checkpoint.
assert!(
!tree_exceeds_checkpoint(correctness_checkpoint, tree),
"tree (count: {}) is ahead of correctness checkpoint {:?}",
tree.count(),
correctness_checkpoint,
);
// All intermediate checkpoints will be stored here and signed once the correctness
// checkpoint is reached.
let mut checkpoint_queue = vec![];
// If the correctness checkpoint is ahead of the tree, we need to ingest more messages.
//
// tree.index() will panic if the tree is empty, so we use tree.count() instead
// and convert the correctness_checkpoint.index to a count by adding 1.
while tree.count() as u32 <= correctness_checkpoint.index {
if let Some(insertion) = self
.message_db
.retrieve_merkle_tree_insertion_by_leaf_index(&(tree.count() as u32))
.unwrap_or_else(|err| {
panic!(
"Error fetching merkle tree insertion for leaf index {}: {}",
tree.count(),
err
)
})
{
debug!(
index = insertion.index(),
queue_length = checkpoint_queue.len(),
"Ingesting leaf to tree"
);
let message_id = insertion.message_id();
tree.ingest(message_id);
let checkpoint = self.checkpoint(tree);
checkpoint_queue.push(CheckpointWithMessageId {
checkpoint,
message_id,
});
} else {
// If we haven't yet indexed the next merkle tree insertion but know that
// it will soon exist (because we know the correctness checkpoint), wait a bit and
// try again.
sleep(Duration::from_millis(100)).await
}
}
// At this point we know that correctness_checkpoint.index == tree.index().
assert_eq!(
correctness_checkpoint.index,
tree.index(),
"correctness checkpoint index {} != tree index {}",
correctness_checkpoint.index,
tree.index(),
);
let checkpoint = self.checkpoint(tree);
// If the tree's checkpoint doesn't match the correctness checkpoint, something went wrong
// and we bail loudly.
if checkpoint != *correctness_checkpoint {
error!(
?checkpoint,
?correctness_checkpoint,
"Incorrect tree root, something went wrong"
);
panic!("Incorrect tree root, something went wrong");
}
if !checkpoint_queue.is_empty() {
info!(
index = checkpoint.index,
queue_len = checkpoint_queue.len(),
"Reached tree consistency"
);
self.sign_and_submit_checkpoints(checkpoint_queue).await;
info!(
index = checkpoint.index,
"Signed all queued checkpoints until index"
);
}
}
async fn sign_and_submit_checkpoint(
&self,
checkpoint: CheckpointWithMessageId,
) -> ChainResult<()> {
let existing = self
.checkpoint_syncer
.fetch_checkpoint(checkpoint.index)
.await?;
if existing.is_some() {
debug!(index = checkpoint.index, "Checkpoint already submitted");
return Ok(());
}
let signed_checkpoint = self.signer.sign(checkpoint).await?;
self.checkpoint_syncer
.write_checkpoint(&signed_checkpoint)
.await?;
debug!(index = checkpoint.index, "Signed and submitted checkpoint");
// TODO: move these into S3 implementations
// small sleep before signing next checkpoint to avoid rate limiting
sleep(Duration::from_millis(100)).await;
Ok(())
}
/// Signs and submits any previously unsubmitted checkpoints.
async fn sign_and_submit_checkpoints(&self, checkpoints: Vec<CheckpointWithMessageId>) {
let last_checkpoint = checkpoints.as_slice()[checkpoints.len() - 1];
// Submits checkpoints to the store in reverse order. This speeds up processing historic checkpoints (those before the validator is spun up),
// since those are the most likely to make messages become processable.
// A side effect is that new checkpoints will also be submitted in reverse order.
for queued_checkpoint in checkpoints.into_iter().rev() {
// certain checkpoint stores rate limit very aggressively, so we retry indefinitely
call_and_retry_indefinitely(|| {
let self_clone = self.clone();
Box::pin(async move {
self_clone
.sign_and_submit_checkpoint(queued_checkpoint)
.await?;
Ok(())
})
})
.await;
}
call_and_retry_indefinitely(|| {
let self_clone = self.clone();
Box::pin(async move {
self_clone
.checkpoint_syncer
.update_latest_index(last_checkpoint.index)
.await?;
Ok(())
})
})
.await;
}
}
/// Returns whether the tree exceeds the checkpoint.
fn tree_exceeds_checkpoint(checkpoint: &Checkpoint, tree: &IncrementalMerkle) -> bool {
// tree.index() will panic if the tree is empty, so we use tree.count() instead
// and convert the correctness_checkpoint.index to a count by adding 1.
checkpoint.index + 1 < tree.count() as u32
}
#[derive(Clone)]
pub(crate) struct ValidatorSubmitterMetrics {
latest_checkpoint_observed: IntGauge,
latest_checkpoint_processed: IntGauge,
}
impl ValidatorSubmitterMetrics {
pub fn new(metrics: &CoreMetrics, mailbox_chain: &HyperlaneDomain) -> Self {
let chain_name = mailbox_chain.name();
Self {
latest_checkpoint_observed: metrics
.latest_checkpoint()
.with_label_values(&["validator_observed", chain_name]),
latest_checkpoint_processed: metrics
.latest_checkpoint()
.with_label_values(&["validator_processed", chain_name]),
}
}
}

@ -1,57 +0,0 @@
use cosmrs::{crypto::PublicKey, AccountId};
use tendermint::account::Id as TendermintAccountId;
use tendermint::public_key::PublicKey as TendermintPublicKey;
use hyperlane_core::Error::Overflow;
use hyperlane_core::{ChainCommunicationError, ChainResult, H256};
use crate::HyperlaneCosmosError;
pub(crate) struct CosmosAccountId<'a> {
account_id: &'a AccountId,
}
impl<'a> CosmosAccountId<'a> {
pub fn new(account_id: &'a AccountId) -> Self {
Self { account_id }
}
pub fn account_id_from_pubkey(pub_key: PublicKey, prefix: &str) -> ChainResult<AccountId> {
// Get the inner type
let tendermint_pub_key = TendermintPublicKey::from(pub_key);
// Get the RIPEMD160(SHA256(pub_key))
let tendermint_id = TendermintAccountId::from(tendermint_pub_key);
// Bech32 encoding
let account_id = AccountId::new(prefix, tendermint_id.as_bytes())
.map_err(Into::<HyperlaneCosmosError>::into)?;
Ok(account_id)
}
}
impl TryFrom<&CosmosAccountId<'_>> for H256 {
type Error = ChainCommunicationError;
/// Builds a H256 digest from a cosmos AccountId (Bech32 encoding)
fn try_from(account_id: &CosmosAccountId) -> Result<Self, Self::Error> {
let bytes = account_id.account_id.to_bytes();
let h256_len = H256::len_bytes();
let Some(start_point) = h256_len.checked_sub(bytes.len()) else {
// input is too large to fit in a H256
return Err(Overflow.into());
};
let mut empty_hash = H256::default();
let result = empty_hash.as_bytes_mut();
result[start_point..].copy_from_slice(bytes.as_slice());
Ok(H256::from_slice(result))
}
}
impl TryFrom<CosmosAccountId<'_>> for H256 {
type Error = ChainCommunicationError;
/// Builds a H256 digest from a cosmos AccountId (Bech32 encoding)
fn try_from(account_id: CosmosAccountId) -> Result<Self, Self::Error> {
(&account_id).try_into()
}
}

@ -1,287 +0,0 @@
use async_trait::async_trait;
use cosmrs::cosmwasm::MsgExecuteContract;
use cosmrs::crypto::PublicKey;
use cosmrs::tx::{MessageExt, SequenceNumber, SignerInfo};
use cosmrs::{AccountId, Tx};
use itertools::Itertools;
use tendermint::hash::Algorithm;
use tendermint::Hash;
use tendermint_rpc::{client::CompatMode, Client, HttpClient};
use time::OffsetDateTime;
use hyperlane_core::{
BlockInfo, ChainCommunicationError, ChainInfo, ChainResult, ContractLocator, HyperlaneChain,
HyperlaneDomain, HyperlaneProvider, TxnInfo, TxnReceiptInfo, H256, U256,
};
use crate::address::CosmosAddress;
use crate::grpc::WasmProvider;
use crate::libs::account::CosmosAccountId;
use crate::{ConnectionConf, CosmosAmount, HyperlaneCosmosError, Signer};
use self::grpc::WasmGrpcProvider;
/// cosmos grpc provider
pub mod grpc;
/// cosmos rpc provider
pub mod rpc;
/// Abstraction over a connection to a Cosmos chain
#[derive(Debug, Clone)]
pub struct CosmosProvider {
domain: HyperlaneDomain,
connection_conf: ConnectionConf,
grpc_client: WasmGrpcProvider,
rpc_client: HttpClient,
}
impl CosmosProvider {
/// Create a reference to a Cosmos chain
pub fn new(
domain: HyperlaneDomain,
conf: ConnectionConf,
locator: Option<ContractLocator>,
signer: Option<Signer>,
) -> ChainResult<Self> {
let gas_price = CosmosAmount::try_from(conf.get_minimum_gas_price().clone())?;
let grpc_client = WasmGrpcProvider::new(
domain.clone(),
conf.clone(),
gas_price.clone(),
locator,
signer,
)?;
let rpc_client = HttpClient::builder(
conf.get_rpc_url()
.parse()
.map_err(Into::<HyperlaneCosmosError>::into)?,
)
// Consider supporting different compatibility modes.
.compat_mode(CompatMode::latest())
.build()
.map_err(Into::<HyperlaneCosmosError>::into)?;
Ok(Self {
domain,
connection_conf: conf,
rpc_client,
grpc_client,
})
}
/// Get a grpc client
pub fn grpc(&self) -> &WasmGrpcProvider {
&self.grpc_client
}
/// Get an rpc client
pub fn rpc(&self) -> &HttpClient {
&self.rpc_client
}
fn search_payer_in_signer_infos(
&self,
signer_infos: &[SignerInfo],
payer: &AccountId,
) -> ChainResult<(AccountId, SequenceNumber)> {
signer_infos
.iter()
.map(|si| self.convert_signer_info_into_account_id_and_nonce(si))
// After the following we have a single Ok entry and, possibly, many Err entries
.filter_ok(|(a, s)| payer == a)
// If we have Ok entry, use it since it is the payer, if not, use the first entry with error
.find_or_first(|r| match r {
Ok((a, s)) => payer == a,
Err(e) => false,
})
// If there were not any signer info with non-empty public key or no signers for the transaction,
// we get None here
.unwrap_or_else(|| Err(ChainCommunicationError::from_other_str("no signer info")))
}
fn convert_signer_info_into_account_id_and_nonce(
&self,
signer_info: &SignerInfo,
) -> ChainResult<(AccountId, SequenceNumber)> {
let signer_public_key = signer_info.public_key.clone().ok_or_else(|| {
HyperlaneCosmosError::PublicKeyError("no public key for default signer".to_owned())
})?;
let public_key = PublicKey::try_from(signer_public_key)?;
let account_id = CosmosAccountId::account_id_from_pubkey(
public_key,
&self.connection_conf.get_bech32_prefix(),
)?;
Ok((account_id, signer_info.sequence))
}
/// Calculates the sender and the nonce for the transaction.
/// We use `payer` of the fees as the sender of the transaction, and we search for `payer`
/// signature information to find the nonce.
/// If `payer` is not specified, we use the account which signed the transaction first, as
/// the sender.
fn sender_and_nonce(&self, tx: &Tx) -> ChainResult<(H256, SequenceNumber)> {
let (sender, nonce) = tx
.auth_info
.fee
.payer
.as_ref()
.map(|payer| self.search_payer_in_signer_infos(&tx.auth_info.signer_infos, payer))
.map_or_else(
|| {
let signer_info = tx.auth_info.signer_infos.get(0).ok_or_else(|| {
HyperlaneCosmosError::SignerInfoError(
"no signer info in default signer".to_owned(),
)
})?;
self.convert_signer_info_into_account_id_and_nonce(signer_info)
},
|p| p,
)
.map(|(a, n)| CosmosAddress::from_account_id(a).map(|a| (a.digest(), n)))??;
Ok((sender, nonce))
}
/// Extract contract address from transaction.
/// Assumes that there is only one `MsgExecuteContract` message in the transaction
fn contract(tx: &Tx) -> ChainResult<H256> {
use cosmrs::proto::cosmwasm::wasm::v1::MsgExecuteContract as ProtoMsgExecuteContract;
let any = tx
.body
.messages
.iter()
.find(|a| a.type_url == "/cosmwasm.wasm.v1.MsgExecuteContract")
.ok_or_else(|| {
ChainCommunicationError::from_other_str("could not find contract execution message")
})?;
let proto =
ProtoMsgExecuteContract::from_any(any).map_err(Into::<HyperlaneCosmosError>::into)?;
let msg = MsgExecuteContract::try_from(proto)?;
let contract = H256::try_from(CosmosAccountId::new(&msg.contract))?;
Ok(contract)
}
}
impl HyperlaneChain for CosmosProvider {
fn domain(&self) -> &HyperlaneDomain {
&self.domain
}
fn provider(&self) -> Box<dyn HyperlaneProvider> {
Box::new(self.clone())
}
}
#[async_trait]
impl HyperlaneProvider for CosmosProvider {
async fn get_block_by_hash(&self, hash: &H256) -> ChainResult<BlockInfo> {
let tendermint_hash = Hash::from_bytes(Algorithm::Sha256, hash.as_bytes())
.expect("block hash should be of correct size");
let response = self
.rpc_client
.block_by_hash(tendermint_hash)
.await
.map_err(ChainCommunicationError::from_other)?;
let received_hash = H256::from_slice(response.block_id.hash.as_bytes());
if &received_hash != hash {
return Err(ChainCommunicationError::from_other_str(
&format!("received incorrect block, expected hash: {hash:?}, received hash: {received_hash:?}")
));
}
let block = response.block.ok_or_else(|| {
ChainCommunicationError::from_other_str(&format!(
"empty block info for block: {:?}",
hash
))
})?;
let time: OffsetDateTime = block.header.time.into();
let block_info = BlockInfo {
hash: hash.to_owned(),
timestamp: time.unix_timestamp() as u64,
number: block.header.height.value(),
};
Ok(block_info)
}
async fn get_txn_by_hash(&self, hash: &H256) -> ChainResult<TxnInfo> {
let tendermint_hash = Hash::from_bytes(Algorithm::Sha256, hash.as_bytes())
.expect("transaction hash should be of correct size");
let response = self
.rpc_client
.tx(tendermint_hash, false)
.await
.map_err(Into::<HyperlaneCosmosError>::into)?;
let received_hash = H256::from_slice(response.hash.as_bytes());
if &received_hash != hash {
return Err(ChainCommunicationError::from_other_str(&format!(
"received incorrect transaction, expected hash: {:?}, received hash: {:?}",
hash, received_hash,
)));
}
let tx = Tx::from_bytes(&response.tx)?;
let contract = Self::contract(&tx)?;
let (sender, nonce) = self.sender_and_nonce(&tx)?;
// TODO support multiple denomination for amount
let gas_limit = U256::from(tx.auth_info.fee.gas_limit);
let fee = tx
.auth_info
.fee
.amount
.iter()
.fold(U256::zero(), |acc, a| acc + a.amount);
let gas_price = fee / gas_limit;
let tx_info = TxnInfo {
hash: hash.to_owned(),
gas_limit: U256::from(response.tx_result.gas_wanted),
max_priority_fee_per_gas: None,
max_fee_per_gas: None,
gas_price: Some(gas_price),
nonce,
sender,
recipient: Some(contract),
receipt: Some(TxnReceiptInfo {
gas_used: U256::from(response.tx_result.gas_used),
cumulative_gas_used: U256::from(response.tx_result.gas_used),
effective_gas_price: Some(gas_price),
}),
};
Ok(tx_info)
}
async fn is_contract(&self, address: &H256) -> ChainResult<bool> {
match self.grpc_client.wasm_contract_info().await {
Ok(c) => Ok(true),
Err(e) => Ok(false),
}
}
async fn get_balance(&self, address: String) -> ChainResult<U256> {
Ok(self
.grpc_client
.get_balance(address, self.connection_conf.get_canonical_asset())
.await?)
}
async fn get_chain_metrics(&self) -> ChainResult<Option<ChainInfo>> {
Ok(None)
}
}

@ -1,54 +0,0 @@
use hyperlane_core::{config::OperationBatchConfig, U256};
use url::Url;
/// Ethereum RPC connection configuration
#[derive(Debug, Clone)]
pub enum RpcConnectionConf {
/// An HTTP-only quorum.
HttpQuorum {
/// List of urls to connect to
urls: Vec<Url>,
},
/// An HTTP-only fallback set.
HttpFallback {
/// List of urls to connect to in order of priority
urls: Vec<Url>,
},
/// HTTP connection details
Http {
/// Url to connect to
url: Url,
},
/// Websocket connection details
Ws {
/// Url to connect to
url: Url,
},
}
/// Ethereum connection configuration
#[derive(Debug, Clone)]
pub struct ConnectionConf {
/// RPC connection configuration
pub rpc_connection: RpcConnectionConf,
/// Transaction overrides to use when sending transactions.
pub transaction_overrides: TransactionOverrides,
/// Operation batching configuration
pub operation_batch: OperationBatchConfig,
}
/// Ethereum transaction overrides.
#[derive(Debug, Clone, Default)]
pub struct TransactionOverrides {
/// Gas price to use for transactions, in wei.
/// If specified, non-1559 transactions will be used with this gas price.
pub gas_price: Option<U256>,
/// Gas limit to use for transactions.
/// If unspecified, the gas limit will be estimated.
/// If specified, transactions will use `max(estimated_gas, gas_limit)`
pub gas_limit: Option<U256>,
/// Max fee per gas to use for EIP-1559 transactions.
pub max_fee_per_gas: Option<U256>,
/// Max priority fee per gas to use for EIP-1559 transactions.
pub max_priority_fee_per_gas: Option<U256>,
}

@ -1,530 +0,0 @@
{
"types": [
{
"typeId": 0,
"type": "()",
"components": [],
"typeParameters": null
},
{
"typeId": 1,
"type": "(_, _)",
"components": [
{
"name": "__tuple_element",
"type": 2,
"typeArguments": null
},
{
"name": "__tuple_element",
"type": 20,
"typeArguments": null
}
],
"typeParameters": null
},
{
"typeId": 2,
"type": "b256",
"components": null,
"typeParameters": null
},
{
"typeId": 3,
"type": "bool",
"components": null,
"typeParameters": null
},
{
"typeId": 4,
"type": "enum Identity",
"components": [
{
"name": "Address",
"type": 14,
"typeArguments": null
},
{
"name": "ContractId",
"type": 15,
"typeArguments": null
}
],
"typeParameters": null
},
{
"typeId": 5,
"type": "enum Option",
"components": [
{
"name": "None",
"type": 0,
"typeArguments": null
},
{
"name": "Some",
"type": 6,
"typeArguments": null
}
],
"typeParameters": [
6
]
},
{
"typeId": 6,
"type": "generic T",
"components": null,
"typeParameters": null
},
{
"typeId": 7,
"type": "raw untyped ptr",
"components": null,
"typeParameters": null
},
{
"typeId": 8,
"type": "str[12]",
"components": null,
"typeParameters": null
},
{
"typeId": 9,
"type": "str[16]",
"components": null,
"typeParameters": null
},
{
"typeId": 10,
"type": "str[6]",
"components": null,
"typeParameters": null
},
{
"typeId": 11,
"type": "str[7]",
"components": null,
"typeParameters": null
},
{
"typeId": 12,
"type": "str[8]",
"components": null,
"typeParameters": null
},
{
"typeId": 13,
"type": "str[9]",
"components": null,
"typeParameters": null
},
{
"typeId": 14,
"type": "struct Address",
"components": [
{
"name": "value",
"type": 2,
"typeArguments": null
}
],
"typeParameters": null
},
{
"typeId": 15,
"type": "struct ContractId",
"components": [
{
"name": "value",
"type": 2,
"typeArguments": null
}
],
"typeParameters": null
},
{
"typeId": 16,
"type": "struct Message",
"components": [
{
"name": "version",
"type": 22,
"typeArguments": null
},
{
"name": "nonce",
"type": 20,
"typeArguments": null
},
{
"name": "origin",
"type": 20,
"typeArguments": null
},
{
"name": "sender",
"type": 2,
"typeArguments": null
},
{
"name": "destination",
"type": 20,
"typeArguments": null
},
{
"name": "recipient",
"type": 2,
"typeArguments": null
},
{
"name": "body",
"type": 19,
"typeArguments": [
{
"name": "",
"type": 22,
"typeArguments": null
}
]
}
],
"typeParameters": null
},
{
"typeId": 17,
"type": "struct OwnershipTransferredEvent",
"components": [
{
"name": "previous_owner",
"type": 5,
"typeArguments": [
{
"name": "",
"type": 4,
"typeArguments": null
}
]
},
{
"name": "new_owner",
"type": 5,
"typeArguments": [
{
"name": "",
"type": 4,
"typeArguments": null
}
]
}
],
"typeParameters": null
},
{
"typeId": 18,
"type": "struct RawVec",
"components": [
{
"name": "ptr",
"type": 7,
"typeArguments": null
},
{
"name": "cap",
"type": 21,
"typeArguments": null
}
],
"typeParameters": [
6
]
},
{
"typeId": 19,
"type": "struct Vec",
"components": [
{
"name": "buf",
"type": 18,
"typeArguments": [
{
"name": "",
"type": 6,
"typeArguments": null
}
]
},
{
"name": "len",
"type": 21,
"typeArguments": null
}
],
"typeParameters": [
6
]
},
{
"typeId": 20,
"type": "u32",
"components": null,
"typeParameters": null
},
{
"typeId": 21,
"type": "u64",
"components": null,
"typeParameters": null
},
{
"typeId": 22,
"type": "u8",
"components": null,
"typeParameters": null
}
],
"functions": [
{
"inputs": [],
"name": "count",
"output": {
"name": "",
"type": 20,
"typeArguments": null
}
},
{
"inputs": [
{
"name": "message_id",
"type": 2,
"typeArguments": null
}
],
"name": "delivered",
"output": {
"name": "",
"type": 3,
"typeArguments": null
}
},
{
"inputs": [
{
"name": "destination_domain",
"type": 20,
"typeArguments": null
},
{
"name": "recipient",
"type": 2,
"typeArguments": null
},
{
"name": "message_body",
"type": 19,
"typeArguments": [
{
"name": "",
"type": 22,
"typeArguments": null
}
]
}
],
"name": "dispatch",
"output": {
"name": "",
"type": 2,
"typeArguments": null
}
},
{
"inputs": [],
"name": "get_default_ism",
"output": {
"name": "",
"type": 15,
"typeArguments": null
}
},
{
"inputs": [],
"name": "latest_checkpoint",
"output": {
"name": "",
"type": 1,
"typeArguments": null
}
},
{
"inputs": [
{
"name": "metadata",
"type": 19,
"typeArguments": [
{
"name": "",
"type": 22,
"typeArguments": null
}
]
},
{
"name": "_message",
"type": 16,
"typeArguments": null
}
],
"name": "process",
"output": {
"name": "",
"type": 0,
"typeArguments": null
}
},
{
"inputs": [],
"name": "root",
"output": {
"name": "",
"type": 2,
"typeArguments": null
}
},
{
"inputs": [
{
"name": "module",
"type": 15,
"typeArguments": null
}
],
"name": "set_default_ism",
"output": {
"name": "",
"type": 0,
"typeArguments": null
}
},
{
"inputs": [],
"name": "owner",
"output": {
"name": "",
"type": 5,
"typeArguments": [
{
"name": "",
"type": 4,
"typeArguments": null
}
]
}
},
{
"inputs": [
{
"name": "new_owner",
"type": 5,
"typeArguments": [
{
"name": "",
"type": 4,
"typeArguments": null
}
]
}
],
"name": "transfer_ownership",
"output": {
"name": "",
"type": 0,
"typeArguments": null
}
}
],
"loggedTypes": [
{
"logId": 0,
"loggedType": {
"name": "",
"type": 8,
"typeArguments": null
}
},
{
"logId": 1,
"loggedType": {
"name": "",
"type": 9,
"typeArguments": null
}
},
{
"logId": 2,
"loggedType": {
"name": "",
"type": 12,
"typeArguments": null
}
},
{
"logId": 3,
"loggedType": {
"name": "",
"type": 8,
"typeArguments": null
}
},
{
"logId": 4,
"loggedType": {
"name": "",
"type": 13,
"typeArguments": null
}
},
{
"logId": 5,
"loggedType": {
"name": "",
"type": 11,
"typeArguments": null
}
},
{
"logId": 6,
"loggedType": {
"name": "",
"type": 2,
"typeArguments": null
}
},
{
"logId": 7,
"loggedType": {
"name": "",
"type": 10,
"typeArguments": null
}
},
{
"logId": 8,
"loggedType": {
"name": "",
"type": 10,
"typeArguments": null
}
},
{
"logId": 9,
"loggedType": {
"name": "",
"type": 17,
"typeArguments": []
}
}
],
"messagesTypes": []
}

@ -1,164 +0,0 @@
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::num::NonZeroU64;
use std::ops::RangeInclusive;
use async_trait::async_trait;
use fuels::prelude::{Bech32ContractId, WalletUnlocked};
use hyperlane_core::Indexed;
use tracing::instrument;
use hyperlane_core::{
utils::bytes_to_hex, ChainCommunicationError, ChainResult, ContractLocator, HyperlaneAbi,
HyperlaneChain, HyperlaneContract, HyperlaneDomain, HyperlaneMessage, HyperlaneProvider,
Indexer, LogMeta, Mailbox, TxCostEstimate, TxOutcome, H256, U256,
};
use crate::{
contracts::mailbox::Mailbox as FuelMailboxInner, conversions::*, make_provider, ConnectionConf,
};
/// A reference to a Mailbox contract on some Fuel chain
pub struct FuelMailbox {
contract: FuelMailboxInner,
domain: HyperlaneDomain,
}
impl FuelMailbox {
/// Create a new fuel mailbox
pub fn new(
conf: &ConnectionConf,
locator: ContractLocator,
mut wallet: WalletUnlocked,
) -> ChainResult<Self> {
let provider = make_provider(conf)?;
wallet.set_provider(provider);
let address = Bech32ContractId::from_h256(&locator.address);
Ok(FuelMailbox {
contract: FuelMailboxInner::new(address, wallet),
domain: locator.domain.clone(),
})
}
}
impl HyperlaneContract for FuelMailbox {
fn address(&self) -> H256 {
self.contract.contract_id().into_h256()
}
}
impl HyperlaneChain for FuelMailbox {
fn domain(&self) -> &HyperlaneDomain {
&self.domain
}
fn provider(&self) -> Box<dyn HyperlaneProvider> {
todo!()
}
}
impl Debug for FuelMailbox {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self as &dyn HyperlaneContract)
}
}
#[async_trait]
impl Mailbox for FuelMailbox {
#[instrument(level = "debug", err, ret, skip(self))]
async fn count(&self, lag: Option<NonZeroU64>) -> ChainResult<u32> {
assert!(
lag.is_none(),
"Fuel does not support querying point-in-time"
);
self.contract
.methods()
.count()
.simulate()
.await
.map(|r| r.value)
.map_err(ChainCommunicationError::from_other)
}
#[instrument(level = "debug", err, ret, skip(self))]
async fn delivered(&self, id: H256) -> ChainResult<bool> {
todo!()
}
#[instrument(err, ret, skip(self))]
async fn default_ism(&self) -> ChainResult<H256> {
todo!()
}
#[instrument(err, ret, skip(self))]
async fn recipient_ism(&self, recipient: H256) -> ChainResult<H256> {
todo!()
}
#[instrument(err, ret, skip(self))]
async fn process(
&self,
message: &HyperlaneMessage,
metadata: &[u8],
tx_gas_limit: Option<U256>,
) -> ChainResult<TxOutcome> {
todo!()
}
#[instrument(err, ret, skip(self), fields(msg=%message, metadata=%bytes_to_hex(metadata)))]
async fn process_estimate_costs(
&self,
message: &HyperlaneMessage,
metadata: &[u8],
) -> ChainResult<TxCostEstimate> {
todo!()
}
fn process_calldata(&self, message: &HyperlaneMessage, metadata: &[u8]) -> Vec<u8> {
todo!()
}
}
/// Struct that retrieves event data for a Fuel Mailbox contract
#[derive(Debug)]
pub struct FuelMailboxIndexer {}
#[async_trait]
impl Indexer<HyperlaneMessage> for FuelMailboxIndexer {
async fn fetch_logs_in_range(
&self,
range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> {
todo!()
}
async fn get_finalized_block_number(&self) -> ChainResult<u32> {
todo!()
}
}
#[async_trait]
impl Indexer<H256> for FuelMailboxIndexer {
async fn fetch_logs_in_range(
&self,
range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> {
todo!()
}
async fn get_finalized_block_number(&self) -> ChainResult<u32> {
todo!()
}
}
struct FuelMailboxAbi;
impl HyperlaneAbi for FuelMailboxAbi {
const SELECTOR_SIZE_BYTES: usize = 8;
fn fn_map() -> HashMap<Vec<u8>, &'static str> {
// Can't support this without Fuels exporting it in the generated code
todo!()
}
}

@ -1,43 +0,0 @@
use async_trait::async_trait;
use hyperlane_core::{
BlockInfo, ChainInfo, ChainResult, HyperlaneChain, HyperlaneDomain, HyperlaneProvider, TxnInfo,
H256, U256,
};
/// A wrapper around a fuel provider to get generic blockchain information.
#[derive(Debug)]
pub struct FuelProvider {}
impl HyperlaneChain for FuelProvider {
fn domain(&self) -> &HyperlaneDomain {
todo!()
}
fn provider(&self) -> Box<dyn HyperlaneProvider> {
todo!()
}
}
#[async_trait]
impl HyperlaneProvider for FuelProvider {
async fn get_block_by_hash(&self, hash: &H256) -> ChainResult<BlockInfo> {
todo!()
}
async fn get_txn_by_hash(&self, hash: &H256) -> ChainResult<TxnInfo> {
todo!()
}
async fn is_contract(&self, address: &H256) -> ChainResult<bool> {
todo!()
}
async fn get_balance(&self, address: String) -> ChainResult<U256> {
todo!()
}
async fn get_chain_metrics(&self) -> ChainResult<Option<ChainInfo>> {
Ok(None)
}
}

@ -1,35 +0,0 @@
cargo-features = ["workspace-inheritance"]
[package]
name = "hyperlane-sealevel"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow.workspace = true
async-trait.workspace = true
base64.workspace = true
borsh.workspace = true
derive-new.workspace = true
jsonrpc-core.workspace = true
num-traits.workspace = true
serde.workspace = true
solana-account-decoder.workspace = true
solana-client.workspace = true
solana-sdk.workspace = true
solana-transaction-status.workspace = true
thiserror.workspace = true
tracing-futures.workspace = true
tracing.workspace = true
url.workspace = true
account-utils = { path = "../../sealevel/libraries/account-utils" }
hyperlane-core = { path = "../../hyperlane-core", features = ["solana", "async"] }
hyperlane-sealevel-interchain-security-module-interface = { path = "../../sealevel/libraries/interchain-security-module-interface" }
hyperlane-sealevel-mailbox = { path = "../../sealevel/programs/mailbox", features = ["no-entrypoint"] }
hyperlane-sealevel-igp = { path = "../../sealevel/programs/hyperlane-sealevel-igp", features = ["no-entrypoint"] }
hyperlane-sealevel-message-recipient-interface = { path = "../../sealevel/libraries/message-recipient-interface" }
hyperlane-sealevel-multisig-ism-message-id = { path = "../../sealevel/programs/ism/multisig-ism-message-id", features = ["no-entrypoint"] }
hyperlane-sealevel-validator-announce = { path = "../../sealevel/programs/validator-announce", features = ["no-entrypoint"] }
multisig-ism = { path = "../../sealevel/libraries/multisig-ism" }
serializable-account-meta = { path = "../../sealevel/libraries/serializable-account-meta" }

@ -1,29 +0,0 @@
use solana_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::commitment_config::CommitmentConfig;
/// Kludge to implement Debug for RpcClient.
pub struct RpcClientWithDebug(RpcClient);
impl RpcClientWithDebug {
pub fn new(rpc_endpoint: String) -> Self {
Self(RpcClient::new(rpc_endpoint))
}
pub fn new_with_commitment(rpc_endpoint: String, commitment: CommitmentConfig) -> Self {
Self(RpcClient::new_with_commitment(rpc_endpoint, commitment))
}
}
impl std::fmt::Debug for RpcClientWithDebug {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("RpcClient { ... }")
}
}
impl std::ops::Deref for RpcClientWithDebug {
type Target = RpcClient;
fn deref(&self) -> &Self::Target {
&self.0
}
}

@ -1,23 +0,0 @@
use hyperlane_core::ChainCommunicationError;
use solana_client::client_error::ClientError;
use solana_sdk::pubkey::ParsePubkeyError;
/// Errors from the crates specific to the hyperlane-sealevel
/// implementation.
/// This error can then be converted into the broader error type
/// in hyperlane-core using the `From` trait impl
#[derive(Debug, thiserror::Error)]
pub enum HyperlaneSealevelError {
/// ParsePubkeyError error
#[error("{0}")]
ParsePubkeyError(#[from] ParsePubkeyError),
/// ClientError error
#[error("{0}")]
ClientError(#[from] ClientError),
}
impl From<HyperlaneSealevelError> for ChainCommunicationError {
fn from(value: HyperlaneSealevelError) -> Self {
ChainCommunicationError::from_other(value)
}
}

@ -1,84 +0,0 @@
use std::{str::FromStr, sync::Arc};
use async_trait::async_trait;
use hyperlane_core::{
BlockInfo, ChainInfo, ChainResult, HyperlaneChain, HyperlaneDomain, HyperlaneProvider, TxnInfo,
H256, U256,
};
use solana_sdk::{commitment_config::CommitmentConfig, pubkey::Pubkey};
use crate::{client::RpcClientWithDebug, error::HyperlaneSealevelError, ConnectionConf};
/// A wrapper around a Sealevel provider to get generic blockchain information.
#[derive(Debug)]
pub struct SealevelProvider {
domain: HyperlaneDomain,
rpc_client: Arc<RpcClientWithDebug>,
}
impl SealevelProvider {
/// Create a new Sealevel provider.
pub fn new(domain: HyperlaneDomain, conf: &ConnectionConf) -> Self {
// Set the `processed` commitment at rpc level
let rpc_client = Arc::new(RpcClientWithDebug::new_with_commitment(
conf.url.to_string(),
CommitmentConfig::processed(),
));
SealevelProvider { domain, rpc_client }
}
/// Get an rpc client
pub fn rpc(&self) -> &RpcClientWithDebug {
&self.rpc_client
}
/// Get the balance of an address
pub async fn get_balance(&self, address: String) -> ChainResult<U256> {
let pubkey = Pubkey::from_str(&address).map_err(Into::<HyperlaneSealevelError>::into)?;
let balance = self
.rpc_client
.get_balance(&pubkey)
.await
.map_err(Into::<HyperlaneSealevelError>::into)?;
Ok(balance.into())
}
}
impl HyperlaneChain for SealevelProvider {
fn domain(&self) -> &HyperlaneDomain {
&self.domain
}
fn provider(&self) -> Box<dyn HyperlaneProvider> {
Box::new(SealevelProvider {
domain: self.domain.clone(),
rpc_client: self.rpc_client.clone(),
})
}
}
#[async_trait]
impl HyperlaneProvider for SealevelProvider {
async fn get_block_by_hash(&self, _hash: &H256) -> ChainResult<BlockInfo> {
todo!() // FIXME
}
async fn get_txn_by_hash(&self, _hash: &H256) -> ChainResult<TxnInfo> {
todo!() // FIXME
}
async fn is_contract(&self, _address: &H256) -> ChainResult<bool> {
// FIXME
Ok(true)
}
async fn get_balance(&self, address: String) -> ChainResult<U256> {
self.get_balance(address).await
}
async fn get_chain_metrics(&self) -> ChainResult<Option<ChainInfo>> {
Ok(None)
}
}

@ -1,93 +0,0 @@
use base64::Engine;
use borsh::{BorshDeserialize, BorshSerialize};
use hyperlane_core::{ChainCommunicationError, ChainResult};
use serializable_account_meta::{SerializableAccountMeta, SimulationReturnData};
use solana_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::{
commitment_config::CommitmentConfig,
instruction::{AccountMeta, Instruction},
message::Message,
signature::{Keypair, Signer},
transaction::Transaction,
};
use solana_transaction_status::UiReturnDataEncoding;
use crate::client::RpcClientWithDebug;
/// Simulates an instruction, and attempts to deserialize it into a T.
/// If no return data at all was returned, returns Ok(None).
/// If some return data was returned but deserialization was unsuccessful,
/// an Err is returned.
pub async fn simulate_instruction<T: BorshDeserialize + BorshSerialize>(
rpc_client: &RpcClient,
payer: &Keypair,
instruction: Instruction,
) -> ChainResult<Option<T>> {
let commitment = CommitmentConfig::finalized();
let (recent_blockhash, _) = rpc_client
.get_latest_blockhash_with_commitment(commitment)
.await
.map_err(ChainCommunicationError::from_other)?;
let return_data = rpc_client
.simulate_transaction(&Transaction::new_unsigned(Message::new_with_blockhash(
&[instruction],
Some(&payer.pubkey()),
&recent_blockhash,
)))
.await
.map_err(ChainCommunicationError::from_other)?
.value
.return_data;
if let Some(return_data) = return_data {
let bytes = match return_data.data.1 {
UiReturnDataEncoding::Base64 => base64::engine::general_purpose::STANDARD
.decode(return_data.data.0)
.map_err(ChainCommunicationError::from_other)?,
};
let decoded_data =
T::try_from_slice(bytes.as_slice()).map_err(ChainCommunicationError::from_other)?;
return Ok(Some(decoded_data));
}
Ok(None)
}
/// Simulates an Instruction that will return a list of AccountMetas.
pub async fn get_account_metas(
rpc_client: &RpcClient,
payer: &Keypair,
instruction: Instruction,
) -> ChainResult<Vec<AccountMeta>> {
// If there's no data at all, default to an empty vec.
let account_metas = simulate_instruction::<SimulationReturnData<Vec<SerializableAccountMeta>>>(
rpc_client,
payer,
instruction,
)
.await?
.map(|serializable_account_metas| {
serializable_account_metas
.return_data
.into_iter()
.map(|serializable_account_meta| serializable_account_meta.into())
.collect()
})
.unwrap_or_else(Vec::new);
Ok(account_metas)
}
pub async fn get_finalized_block_number(rpc_client: &RpcClientWithDebug) -> ChainResult<u32> {
let height = rpc_client
.get_block_height()
.await
.map_err(ChainCommunicationError::from_other)?
.try_into()
// FIXME solana block height is u64...
.expect("sealevel block height exceeds u32::MAX");
Ok(height)
}

File diff suppressed because it is too large Load Diff

@ -1,2 +0,0 @@
pub use rocks::*;
mod rocks;

File diff suppressed because it is too large Load Diff

@ -0,0 +1,329 @@
[workspace]
members = [
"agents/relayer",
"agents/scraper",
"agents/validator",
"chains/hyperlane-cosmos",
"chains/hyperlane-ethereum",
"chains/hyperlane-fuel",
"chains/hyperlane-sealevel",
"ethers-prometheus",
"hyperlane-base",
"hyperlane-core",
"hyperlane-test",
"utils/abigen",
"utils/backtrace-oneline",
"utils/crypto",
"utils/hex",
"utils/run-locally",
]
[workspace.package]
documentation = "https://docs.hyperlane.xyz"
edition = "2021"
homepage = "https://hyperlane.xyz"
license-file = "../LICENSE.md"
publish = false
version = "0.1.0"
[workspace.dependencies]
Inflector = "0.11.4"
anyhow = "1.0"
async-trait = "0.1"
async-rwlock = "1.3"
auto_impl = "1.0"
axum = "0.6.1"
backtrace = "0.3"
base64 = "0.21.2"
bigdecimal = "0.4.2"
bincode = "1.3"
borsh = "0.9"
bs58 = "0.5.0"
bytes = "1"
clap = "4"
chrono = "*"
color-eyre = "0.6"
config = "0.13.3"
console-subscriber = "0.2.0"
convert_case = "0.6"
cosmrs = { version = "0.14", default-features = false, features = [
"cosmwasm",
"rpc",
"tokio",
"grpc",
] }
cosmwasm-std = "*"
crunchy = "0.2"
ctrlc = "3.2"
curve25519-dalek = { version = "~3.2", features = ["serde"] }
derive-new = "0.5"
derive_builder = "0.12"
derive_more = "0.99"
dhat = "0.3.3"
ed25519-dalek = "~1.0"
eyre = "=0.6.8"
fixed-hash = "0.8.0"
fuels = "0.65.0"
fuels-code-gen = "0.65.0"
futures = "0.3"
futures-util = "0.3"
generic-array = { version = "0.14", features = ["serde", "more_lengths"] }
# Required for WASM support https://docs.rs/getrandom/latest/getrandom/#webassembly-support
bech32 = "0.9.1"
elliptic-curve = "0.13.8"
getrandom = { version = "0.2", features = ["js"] }
hex = "0.4.3"
http = "0.2.12"
hyper = "0.14"
hyper-tls = "0.5.0"
hyperlane-cosmwasm-interface = "=0.0.6-rc6"
injective-protobuf = "0.2.2"
injective-std = "=0.1.5"
itertools = "*"
jobserver = "=0.1.26"
jsonrpc-core = "18.0"
k256 = { version = "0.13.4", features = ["arithmetic", "std", "ecdsa"] }
log = "0.4"
macro_rules_attribute = "0.2"
maplit = "1.0"
mockall = "0.11"
nix = { version = "0.26", default-features = false }
num = "0.4"
num-bigint = "0.4"
num-derive = "0.4.0"
num-traits = "0.2"
once_cell = "1.18.0"
parking_lot = "0.12"
paste = "1.0"
pretty_env_logger = "0.5.0"
primitive-types = "=0.12.1"
prometheus = "0.13"
protobuf = "*"
rand = "0.8.5"
regex = "1.5"
reqwest = "0.11"
ripemd = "0.1.3"
rlp = "=0.5.2"
rocksdb = "0.21.0"
sea-orm = { version = "0.11.1", features = [
"sqlx-postgres",
"runtime-tokio-native-tls",
"with-bigdecimal",
"with-time",
"macros",
] }
sea-orm-migration = { version = "0.11.1", features = [
"sqlx-postgres",
"runtime-tokio-native-tls",
] }
semver = "1.0"
serde = { version = "1.0", features = ["derive"] }
serde_bytes = "0.11"
serde_derive = "1.0"
serde_json = "1.0"
sha2 = { version = "0.10.6", default-features = false }
sha256 = "1.1.4"
sha3 = "0.10"
solana-account-decoder = "=1.14.13"
solana-banks-client = "=1.14.13"
solana-banks-interface = "=1.14.13"
solana-banks-server = "=1.14.13"
solana-clap-utils = "=1.14.13"
solana-cli-config = "=1.14.13"
solana-client = "=1.14.13"
solana-program = "=1.14.13"
solana-program-test = "=1.14.13"
solana-sdk = "=1.14.13"
solana-transaction-status = "=1.14.13"
solana-zk-token-sdk = "=1.14.13"
spl-associated-token-account = { version = "=1.1.2", features = [
"no-entrypoint",
] }
spl-noop = { version = "=0.1.3", features = ["no-entrypoint"] }
spl-token = { version = "=3.5.0", features = ["no-entrypoint"] }
spl-token-2022 = { version = "=0.5.0", features = ["no-entrypoint"] }
spl-type-length-value = "=0.1.0"
static_assertions = "1.1"
strum = "0.26.2"
strum_macros = "0.26.2"
tempfile = "3.3"
tendermint = "0.32.2"
tendermint-rpc = { version = "0.32.0", features = ["http-client", "tokio"] }
thiserror = "1.0"
time = "0.3"
tiny-keccak = "2.0.2"
tokio = { version = "1.4", features = ["parking_lot", "tracing"] }
tokio-metrics = { version = "0.3.1", default-features = false }
tokio-test = "0.4"
toml_edit = "0.19.14"
tonic = "0.9.2"
tracing = { version = "0.1" }
tracing-error = "0.2"
tracing-futures = "0.2"
tracing-subscriber = { version = "0.3", default-features = false }
tracing-test = "0.2.2"
typetag = "0.2"
uint = "0.9.5"
ureq = { version = "2.4", default-features = false }
url = "2.3"
walkdir = "2"
warp = "0.3"
which = "4.3"
ya-gcp = { version = "0.11.3", features = ["storage"] }
## TODO: remove this
cosmwasm-schema = "1.2.7"
[profile.release.package.access-control]
overflow-checks = true
[profile.release.package.account-utils]
overflow-checks = true
[profile.release.package.ecdsa-signature]
overflow-checks = true
[profile.release.package.hyperlane-sealevel-interchain-security-module-interface]
overflow-checks = true
[profile.release.package.hyperlane-sealevel-message-recipient-interface]
overflow-checks = true
[profile.release.package.multisig-ism]
overflow-checks = true
[profile.release.package.serializable-account-meta]
overflow-checks = true
[profile.release.package.hyperlane-sealevel-mailbox]
overflow-checks = true
[profile.release.package.hyperlane-sealevel-igp]
overflow-checks = true
[profile.release.package.hyperlane-sealevel-multisig-ism-message-id]
overflow-checks = true
[profile.release.package.hyperlane-sealevel-validator-announce]
overflow-checks = true
[workspace.dependencies.ethers]
features = []
git = "https://github.com/hyperlane-xyz/ethers-rs"
tag = "2024-04-25"
[workspace.dependencies.ethers-contract]
features = ["legacy"]
git = "https://github.com/hyperlane-xyz/ethers-rs"
tag = "2024-04-25"
[workspace.dependencies.ethers-core]
features = []
git = "https://github.com/hyperlane-xyz/ethers-rs"
tag = "2024-04-25"
[workspace.dependencies.ethers-providers]
features = []
git = "https://github.com/hyperlane-xyz/ethers-rs"
tag = "2024-04-25"
[workspace.dependencies.ethers-signers]
features = ["aws"]
git = "https://github.com/hyperlane-xyz/ethers-rs"
tag = "2024-04-25"
[patch.crates-io.curve25519-dalek]
branch = "v3.2.2-relax-zeroize"
git = "https://github.com/Eclipse-Laboratories-Inc/curve25519-dalek"
version = "3.2.2"
[patch.crates-io.ed25519-dalek]
branch = "main"
git = "https://github.com/Eclipse-Laboratories-Inc/ed25519-dalek"
version = "1.0.1"
[patch.crates-io.primitive-types]
branch = "hyperlane"
git = "https://github.com/hyperlane-xyz/parity-common.git"
version = "=0.12.1"
[patch.crates-io.rlp]
branch = "hyperlane"
git = "https://github.com/hyperlane-xyz/parity-common.git"
version = "=0.5.2"
[patch.crates-io.solana-account-decoder]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.solana-clap-utils]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.solana-cli-config]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.solana-client]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.solana-program]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.solana-sdk]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.solana-transaction-status]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.solana-zk-token-sdk]
git = "https://github.com/hyperlane-xyz/solana.git"
tag = "hyperlane-1.14.13-2023-07-04"
version = "=1.14.13"
[patch.crates-io.spl-associated-token-account]
branch = "hyperlane"
git = "https://github.com/hyperlane-xyz/solana-program-library.git"
version = "=1.1.2"
[patch.crates-io.spl-noop]
branch = "hyperlane"
git = "https://github.com/hyperlane-xyz/solana-program-library.git"
version = "=0.1.3"
[patch.crates-io.spl-token]
branch = "hyperlane"
git = "https://github.com/hyperlane-xyz/solana-program-library.git"
version = "=3.5.0"
[patch.crates-io.spl-token-2022]
branch = "hyperlane"
git = "https://github.com/hyperlane-xyz/solana-program-library.git"
version = "=0.5.0"
[patch.crates-io.spl-type-length-value]
version = "=0.1.0"
git = "https://github.com/hyperlane-xyz/solana-program-library.git"
branch = "hyperlane"
[patch.crates-io.tendermint]
branch = "trevor/0.32.2-fork"
git = "https://github.com/hyperlane-xyz/tendermint-rs.git"
version = "=0.32.2"
[patch.crates-io.tendermint-rpc]
branch = "trevor/0.32.2-fork"
git = "https://github.com/hyperlane-xyz/tendermint-rs.git"
version = "=0.32.2"

@ -1,4 +1,3 @@
cargo-features = ["workspace-inheritance"]
[package]
name = "relayer"
@ -35,13 +34,21 @@ serde.workspace = true
serde_json.workspace = true
strum.workspace = true
thiserror.workspace = true
tokio = { workspace = true, features = ["rt", "macros", "parking_lot", "rt-multi-thread"] }
tokio = { workspace = true, features = [
"rt",
"macros",
"parking_lot",
"rt-multi-thread",
] }
tokio-metrics.workspace = true
tracing-futures.workspace = true
tracing.workspace = true
typetag.workspace = true
hyperlane-core = { path = "../../hyperlane-core", features = ["agent", "async"] }
hyperlane-core = { path = "../../hyperlane-core", features = [
"agent",
"async",
] }
hyperlane-base = { path = "../../hyperlane-base", features = ["test-utils"] }
hyperlane-ethereum = { path = "../../chains/hyperlane-ethereum" }
@ -51,6 +58,7 @@ mockall.workspace = true
tokio-test.workspace = true
hyperlane-test = { path = "../../hyperlane-test" }
hyperlane-base = { path = "../../hyperlane-base", features = ["test-utils"] }
hyperlane-core = { path = "../../hyperlane-core", features = ["agent", "async", "test-utils"] }
[features]
default = ["color-eyre", "oneline-errors"]

@ -7,7 +7,7 @@ use std::{
use async_trait::async_trait;
use derive_new::new;
use eyre::Result;
use hyperlane_base::db::HyperlaneRocksDB;
use hyperlane_base::db::{HyperlaneDb, HyperlaneRocksDB};
use hyperlane_core::{HyperlaneDomain, MerkleTreeInsertion};
use prometheus::IntGauge;
use tokio::sync::RwLock;

@ -105,7 +105,7 @@ impl GasPaymentEnforcer {
for (policy, whitelist) in &self.policies {
if !whitelist.msg_matches(message, true) {
trace!(
msg=%message,
hyp_message=%message,
?policy,
?whitelist,
"Message did not match whitelist for policy"
@ -114,16 +114,17 @@ impl GasPaymentEnforcer {
}
trace!(
msg=%message,
hyp_message=%message,
?policy,
?whitelist,
"Message matched whitelist for policy"
);
debug!(
msg=%message,
hyp_message=%message,
?policy,
?current_payment,
?current_expenditure,
?tx_cost_estimate,
"Evaluating if message meets gas payment requirement",
);
return policy
@ -148,7 +149,7 @@ impl GasPaymentEnforcer {
}
error!(
msg=%message,
hyp_message=%message,
policies=?self.policies,
"No gas payment policy matched for message; consider adding a default policy to the end of the policies array which uses a wildcard whitelist."
);
@ -158,7 +159,7 @@ impl GasPaymentEnforcer {
pub fn record_tx_outcome(&self, message: &HyperlaneMessage, outcome: TxOutcome) -> Result<()> {
// This log is required in E2E, hence the use of a `const`
debug!(
msg=%message,
hyp_message=%message,
?outcome,
"{}",
GAS_EXPENDITURE_LOG_MESSAGE,

@ -118,6 +118,7 @@ impl AggregationIsmMetadataBuilder {
#[async_trait]
impl MetadataBuilder for AggregationIsmMetadataBuilder {
#[instrument(err, skip(self), ret)]
#[allow(clippy::blocks_in_conditions)] // TODO: `rustc` 1.80.1 clippy issue
async fn build(
&self,
ism_address: H256,

@ -1,3 +1,6 @@
#![allow(clippy::blocks_in_conditions)] // TODO: `rustc` 1.80.1 clippy issue
#![allow(clippy::unnecessary_get_then_check)] // TODO: `rustc` 1.80.1 clippy issue
use std::{
collections::HashMap,
fmt::Debug,
@ -19,7 +22,7 @@ use crate::{
use async_trait::async_trait;
use derive_new::new;
use eyre::{Context, Result};
use hyperlane_base::db::HyperlaneRocksDB;
use hyperlane_base::db::{HyperlaneDb, HyperlaneRocksDB};
use hyperlane_base::{
settings::{ChainConf, CheckpointSyncerConf},
CheckpointSyncer, CoreMetrics, MultisigCheckpointSyncer,
@ -397,7 +400,7 @@ impl BaseMetadataBuilder {
continue;
}
match config.build(None).await {
match config.build_and_validate(None).await {
Ok(checkpoint_syncer) => {
// found the syncer for this validator
checkpoint_syncers.insert(validator.into(), checkpoint_syncer.into());

@ -1,3 +1,5 @@
#![allow(clippy::blocks_in_conditions)] // TODO: `rustc` 1.80.1 clippy issue
use async_trait::async_trait;
use derive_more::Deref;
use derive_new::new;

@ -126,11 +126,11 @@ impl<T: MultisigIsmMetadataBuilder> MetadataBuilder for T {
.await
.context(CTX)?
{
debug!(?message, ?metadata.checkpoint, "Found checkpoint with quorum");
debug!(hyp_message=?message, ?metadata.checkpoint, "Found checkpoint with quorum");
Ok(Some(self.format_metadata(metadata)?))
} else {
info!(
?message, ?validators, threshold, ism=%multisig_ism.address(),
hyp_message=?message, ?validators, threshold, ism=%multisig_ism.address(),
"Could not fetch metadata: Unable to reach quorum"
);
Ok(None)

@ -45,7 +45,7 @@ impl MultisigIsmMetadataBuilder for MerkleRootMultisigMetadataBuilder {
.await
.context(CTX)?,
debug!(
?message,
hyp_message=?message,
"No merkle leaf found for message id, must have not been enqueued in the tree"
)
);

@ -42,7 +42,7 @@ impl MultisigIsmMetadataBuilder for MessageIdMultisigMetadataBuilder {
.await
.context(CTX)?,
debug!(
?message,
hyp_message=?message,
"No merkle leaf found for message id, must have not been enqueued in the tree"
)
);

@ -2,6 +2,7 @@ mod base;
mod merkle_root_multisig;
mod message_id_multisig;
#[allow(unused_imports)] // TODO: `rustc` 1.80.1 clippy issue
pub use base::{MetadataToken, MultisigIsmMetadataBuilder, MultisigMetadata};
pub use merkle_root_multisig::MerkleRootMultisigMetadataBuilder;

@ -10,6 +10,7 @@ pub struct NullMetadataBuilder {}
#[async_trait]
impl MetadataBuilder for NullMetadataBuilder {
#[allow(clippy::blocks_in_conditions)] // TODO: `rustc` 1.80.1 clippy issue
#[instrument(err, skip(self))]
async fn build(
&self,

@ -15,6 +15,7 @@ pub struct RoutingIsmMetadataBuilder {
#[async_trait]
impl MetadataBuilder for RoutingIsmMetadataBuilder {
#[instrument(err, skip(self), ret)]
#[allow(clippy::blocks_in_conditions)] // TODO: `rustc` 1.80.1 clippy issue
async fn build(
&self,
ism_address: H256,

@ -1,3 +1,5 @@
#![allow(clippy::doc_lazy_continuation)] // TODO: `rustc` 1.80.1 clippy issue
//! Processor scans DB for new messages and wraps relevant messages as a
//! `PendingOperation` and then sends it over a channel to a submitter for
//! delivery.

@ -6,7 +6,7 @@ use prometheus::{IntGauge, IntGaugeVec};
use tokio::sync::{broadcast::Receiver, Mutex};
use tracing::{debug, info, instrument};
use crate::server::MessageRetryRequest;
use crate::settings::matching_list::MatchingList;
pub type OperationPriorityQueue = Arc<Mutex<BinaryHeap<Reverse<QueueOperation>>>>;
@ -16,7 +16,7 @@ pub type OperationPriorityQueue = Arc<Mutex<BinaryHeap<Reverse<QueueOperation>>>
pub struct OpQueue {
metrics: IntGaugeVec,
queue_metrics_label: String,
retry_rx: Arc<Mutex<Receiver<MessageRetryRequest>>>,
retry_rx: Arc<Mutex<Receiver<MatchingList>>>,
#[new(default)]
pub queue: OperationPriorityQueue,
}
@ -29,11 +29,10 @@ impl OpQueue {
/// it's very likely that its status has just changed, so this forces the caller to consider the new status
#[instrument(skip(self), ret, fields(queue_label=%self.queue_metrics_label), level = "trace")]
pub async fn push(&self, mut op: QueueOperation, new_status: Option<PendingOperationStatus>) {
if let Some(new_status) = new_status {
op.set_status(new_status);
}
// increment the metric before pushing onto the queue, because we lose ownership afterwards
self.get_operation_metric(op.as_ref()).inc();
op.set_status_and_update_metrics(
new_status,
Arc::new(self.get_operation_metric(op.as_ref())),
);
self.queue.lock().await.push(Reverse(op));
}
@ -52,9 +51,6 @@ impl OpQueue {
let mut queue = self.queue.lock().await;
let mut popped = vec![];
while let Some(Reverse(op)) = queue.pop() {
// even if the metric is decremented here, the operation may fail to process and be re-added to the queue.
// in those cases, the queue length will look like it has spikes whose sizes are at most `limit`
self.get_operation_metric(op.as_ref()).dec();
popped.push(op);
if popped.len() >= limit {
break;
@ -88,9 +84,7 @@ impl OpQueue {
let mut reprioritized_queue: BinaryHeap<_> = queue
.drain()
.map(|Reverse(mut op)| {
// Can check for equality here because of the PartialEq implementation for MessageRetryRequest,
// but can't use `contains` because the types are different
if message_retry_requests.iter().any(|r| r == op) {
if message_retry_requests.iter().any(|r| r.op_matches(&op)) {
info!(
operation = %op,
queue_label = %self.queue_metrics_label,
@ -120,12 +114,14 @@ impl OpQueue {
pub mod test {
use super::*;
use hyperlane_core::{
HyperlaneDomain, HyperlaneMessage, KnownHyperlaneDomain, PendingOperationResult,
HyperlaneDomain, HyperlaneDomainProtocol, HyperlaneDomainTechnicalStack,
HyperlaneDomainType, HyperlaneMessage, KnownHyperlaneDomain, PendingOperationResult,
TryBatchAs, TxOutcome, H256, U256,
};
use serde::Serialize;
use std::{
collections::VecDeque,
str::FromStr,
time::{Duration, Instant},
};
use tokio::sync;
@ -133,6 +129,10 @@ pub mod test {
#[derive(Debug, Clone, Serialize)]
pub struct MockPendingOperation {
id: H256,
sender_address: H256,
origin_domain_id: u32,
destination_domain_id: u32,
recipient_address: H256,
seconds_to_next_attempt: u64,
destination_domain: HyperlaneDomain,
}
@ -142,7 +142,50 @@ pub mod test {
Self {
id: H256::random(),
seconds_to_next_attempt,
destination_domain_id: destination_domain.id(),
destination_domain,
sender_address: H256::random(),
recipient_address: H256::random(),
origin_domain_id: 0,
}
}
pub fn with_message_data(message: HyperlaneMessage) -> Self {
Self {
id: message.id(),
sender_address: message.sender,
recipient_address: message.recipient,
origin_domain_id: message.origin,
destination_domain_id: message.destination,
seconds_to_next_attempt: 0,
destination_domain: HyperlaneDomain::Unknown {
domain_id: message.destination,
domain_name: "test".to_string(),
domain_type: HyperlaneDomainType::Unknown,
domain_protocol: HyperlaneDomainProtocol::Ethereum,
domain_technical_stack: HyperlaneDomainTechnicalStack::Other,
},
}
}
pub fn with_id(self, id: &str) -> Self {
Self {
id: H256::from_str(id).unwrap(),
..self
}
}
pub fn with_sender_address(self, sender_address: &str) -> Self {
Self {
sender_address: H256::from_str(sender_address).unwrap(),
..self
}
}
pub fn with_recipient_address(self, recipient_address: &str) -> Self {
Self {
recipient_address: H256::from_str(recipient_address).unwrap(),
..self
}
}
}
@ -166,6 +209,20 @@ pub mod test {
self.seconds_to_next_attempt = 0;
}
fn sender_address(&self) -> &H256 {
&self.sender_address
}
fn recipient_address(&self) -> &H256 {
&self.recipient_address
}
fn get_metric(&self) -> Option<Arc<IntGauge>> {
None
}
fn set_metric(&mut self, _metric: Arc<IntGauge>) {}
fn priority(&self) -> u32 {
todo!()
}
@ -179,7 +236,7 @@ pub mod test {
}
fn origin_domain_id(&self) -> u32 {
todo!()
self.origin_domain_id
}
fn destination_domain(&self) -> &HyperlaneDomain {
@ -196,7 +253,7 @@ pub mod test {
/// Submit this operation to the blockchain and report if it was successful
/// or not.
async fn submit(&mut self) {
async fn submit(&mut self) -> PendingOperationResult {
todo!()
}
@ -306,10 +363,10 @@ pub mod test {
// Retry by message ids
broadcaster
.send(MessageRetryRequest::MessageId(op_ids[1]))
.send(MatchingList::with_message_id(op_ids[1]))
.unwrap();
broadcaster
.send(MessageRetryRequest::MessageId(op_ids[2]))
.send(MatchingList::with_message_id(op_ids[2]))
.unwrap();
// Pop elements from queue 1
@ -367,7 +424,7 @@ pub mod test {
// Retry by domain
broadcaster
.send(MessageRetryRequest::DestinationDomain(
.send(MatchingList::with_destination_domain(
destination_domain_2.id(),
))
.unwrap();

@ -1,3 +1,6 @@
#![allow(clippy::doc_markdown)] // TODO: `rustc` 1.80.1 clippy issue
#![allow(clippy::doc_lazy_continuation)] // TODO: `rustc` 1.80.1 clippy issue
use std::sync::Arc;
use std::time::Duration;
@ -9,6 +12,7 @@ use hyperlane_core::BatchResult;
use hyperlane_core::ConfirmReason::*;
use hyperlane_core::PendingOperation;
use hyperlane_core::PendingOperationStatus;
use hyperlane_core::ReprepareReason;
use itertools::Either;
use itertools::Itertools;
use prometheus::{IntCounter, IntGaugeVec};
@ -28,7 +32,7 @@ use hyperlane_core::{
};
use crate::msg::pending_message::CONFIRM_DELAY;
use crate::server::MessageRetryRequest;
use crate::settings::matching_list::MatchingList;
use super::op_queue::OpQueue;
use super::op_queue::OperationPriorityQueue;
@ -101,7 +105,7 @@ impl SerialSubmitter {
pub fn new(
domain: HyperlaneDomain,
rx: mpsc::UnboundedReceiver<QueueOperation>,
retry_op_transmitter: Sender<MessageRetryRequest>,
retry_op_transmitter: Sender<MatchingList>,
metrics: SerialSubmitterMetrics,
max_batch_size: u32,
task_monitor: TaskMonitor,
@ -179,6 +183,7 @@ impl SerialSubmitter {
&task_monitor,
submit_task(
domain.clone(),
prepare_queue.clone(),
submit_queue,
confirm_queue.clone(),
max_batch_size,
@ -288,6 +293,7 @@ async fn prepare_task(
}
PendingOperationResult::Drop => {
metrics.ops_dropped.inc();
op.decrement_metric_if_exists();
}
PendingOperationResult::Confirm(reason) => {
debug!(?op, "Pushing operation to confirm queue");
@ -307,6 +313,7 @@ async fn prepare_task(
#[instrument(skip_all, fields(%domain))]
async fn submit_task(
domain: HyperlaneDomain,
mut prepare_queue: OpQueue,
mut submit_queue: OpQueue,
mut confirm_queue: OpQueue,
max_batch_size: u32,
@ -324,25 +331,59 @@ async fn submit_task(
}
std::cmp::Ordering::Equal => {
let op = batch.pop().unwrap();
submit_single_operation(op, &mut confirm_queue, &metrics).await;
submit_single_operation(op, &mut prepare_queue, &mut confirm_queue, &metrics).await;
}
std::cmp::Ordering::Greater => {
OperationBatch::new(batch, domain.clone())
.submit(&mut confirm_queue, &metrics)
.submit(&mut prepare_queue, &mut confirm_queue, &metrics)
.await;
}
}
}
}
#[instrument(skip(confirm_queue, metrics), ret, level = "debug")]
#[instrument(skip(prepare_queue, confirm_queue, metrics), ret, level = "debug")]
async fn submit_single_operation(
mut op: QueueOperation,
prepare_queue: &mut OpQueue,
confirm_queue: &mut OpQueue,
metrics: &SerialSubmitterMetrics,
) {
let status = op.submit().await;
match status {
PendingOperationResult::Reprepare(reprepare_reason) => {
prepare_queue
.push(op, Some(PendingOperationStatus::Retry(reprepare_reason)))
.await;
}
PendingOperationResult::NotReady => {
// This `match` arm isn't expected to be hit, but it's here for completeness,
// hence the hardcoded `ReprepareReason`
prepare_queue
.push(
op,
Some(PendingOperationStatus::Retry(
ReprepareReason::ErrorSubmitting,
)),
)
.await;
}
PendingOperationResult::Drop => {
// Not expected to hit this case in `submit`, but it's here for completeness
op.decrement_metric_if_exists();
}
PendingOperationResult::Success | PendingOperationResult::Confirm(_) => {
confirm_op(op, confirm_queue, metrics).await
}
}
}
async fn confirm_op(
mut op: QueueOperation,
confirm_queue: &mut OpQueue,
metrics: &SerialSubmitterMetrics,
) {
let destination = op.destination_domain().clone();
op.submit().await;
debug!(?op, "Operation submitted");
op.set_next_attempt_after(CONFIRM_DELAY);
confirm_queue
@ -418,6 +459,7 @@ async fn confirm_operation(
PendingOperationResult::Success => {
debug!(?op, "Operation confirmed");
metrics.ops_confirmed.inc();
op.decrement_metric_if_exists();
}
PendingOperationResult::NotReady => {
confirm_queue.push(op, None).await;
@ -436,6 +478,7 @@ async fn confirm_operation(
}
PendingOperationResult::Drop => {
metrics.ops_dropped.inc();
op.decrement_metric_if_exists();
}
}
operation_result
@ -483,7 +526,12 @@ struct OperationBatch {
}
impl OperationBatch {
async fn submit(self, confirm_queue: &mut OpQueue, metrics: &SerialSubmitterMetrics) {
async fn submit(
self,
prepare_queue: &mut OpQueue,
confirm_queue: &mut OpQueue,
metrics: &SerialSubmitterMetrics,
) {
let excluded_ops = match self.try_submit_as_batch(metrics).await {
Ok(batch_result) => {
Self::handle_batch_result(self.operations, batch_result, confirm_queue).await
@ -497,7 +545,7 @@ impl OperationBatch {
if !excluded_ops.is_empty() {
warn!(excluded_ops=?excluded_ops, "Either the batch tx would revert, or the operations would revert in the batch. Falling back to serial submission.");
OperationBatch::new(excluded_ops, self.domain)
.submit_serially(confirm_queue, metrics)
.submit_serially(prepare_queue, confirm_queue, metrics)
.await;
}
}
@ -562,9 +610,14 @@ impl OperationBatch {
}
}
async fn submit_serially(self, confirm_queue: &mut OpQueue, metrics: &SerialSubmitterMetrics) {
async fn submit_serially(
self,
prepare_queue: &mut OpQueue,
confirm_queue: &mut OpQueue,
metrics: &SerialSubmitterMetrics,
) {
for op in self.operations.into_iter() {
submit_single_operation(op, confirm_queue, metrics).await;
submit_single_operation(op, prepare_queue, confirm_queue, metrics).await;
}
}
}

@ -1,3 +1,5 @@
#![allow(clippy::clone_on_ref_ptr)] // TODO: `rustc` 1.80.1 clippy issue
use std::{
fmt::{Debug, Formatter},
sync::Arc,
@ -7,7 +9,10 @@ use std::{
use async_trait::async_trait;
use derive_new::new;
use eyre::Result;
use hyperlane_base::{db::HyperlaneRocksDB, CoreMetrics};
use hyperlane_base::{
db::{HyperlaneDb, HyperlaneRocksDB},
CoreMetrics,
};
use hyperlane_core::{
gas_used_by_operation, BatchItem, ChainCommunicationError, ChainResult, ConfirmReason,
HyperlaneChain, HyperlaneDomain, HyperlaneMessage, Mailbox, MessageSubmissionData,
@ -28,7 +33,7 @@ pub const CONFIRM_DELAY: Duration = if cfg!(any(test, feature = "test-utils")) {
Duration::from_secs(5)
} else {
// Wait 1 min after submitting the message before confirming in normal/production mode
Duration::from_secs(60)
Duration::from_secs(60 * 10)
};
/// The message context contains the links needed to submit a message. Each
@ -74,6 +79,12 @@ pub struct PendingMessage {
#[new(default)]
#[serde(skip_serializing)]
submission_outcome: Option<TxOutcome>,
#[new(default)]
#[serde(skip_serializing)]
metadata: Option<Vec<u8>>,
#[new(default)]
#[serde(skip_serializing)]
metric: Option<Arc<IntGauge>>,
}
impl Debug for PendingMessage {
@ -156,6 +167,14 @@ impl PendingOperation for PendingMessage {
self.ctx.destination_mailbox.domain()
}
fn sender_address(&self) -> &H256 {
&self.message.sender
}
fn recipient_address(&self) -> &H256 {
&self.message.recipient
}
fn retrieve_status_from_db(&self) -> Option<PendingOperationStatus> {
match self.ctx.origin_db.retrieve_status_by_message_id(&self.id()) {
Ok(status) => status,
@ -252,6 +271,7 @@ impl PendingOperation for PendingMessage {
return self.on_reprepare(Some(err), ReprepareReason::ErrorBuildingMetadata);
}
};
self.metadata = metadata.clone();
let Some(metadata) = metadata else {
return self.on_reprepare::<String>(None, ReprepareReason::CouldNotFetchMetadata);
@ -267,7 +287,7 @@ impl PendingOperation for PendingMessage {
.process_estimate_costs(&self.message, &metadata)
.await
{
Ok(metadata) => metadata,
Ok(tx_cost_estimate) => tx_cost_estimate,
Err(err) => {
return self.on_reprepare(Some(err), ReprepareReason::ErrorEstimatingGas);
}
@ -319,10 +339,10 @@ impl PendingOperation for PendingMessage {
}
#[instrument]
async fn submit(&mut self) {
async fn submit(&mut self) -> PendingOperationResult {
if self.submitted {
// this message has already been submitted, possibly not by us
return;
return PendingOperationResult::Success;
}
let state = self
@ -330,6 +350,19 @@ impl PendingOperation for PendingMessage {
.clone()
.expect("Pending message must be prepared before it can be submitted");
// To avoid spending gas on a tx that will revert, dry-run just before submitting.
if let Some(metadata) = self.metadata.as_ref() {
if self
.ctx
.destination_mailbox
.process_estimate_costs(&self.message, metadata)
.await
.is_err()
{
return self.on_reprepare::<String>(None, ReprepareReason::ErrorEstimatingGas);
}
}
// We use the estimated gas limit from the prior call to
// `process_estimate_costs` to avoid a second gas estimation.
let tx_outcome = self
@ -340,9 +373,11 @@ impl PendingOperation for PendingMessage {
match tx_outcome {
Ok(outcome) => {
self.set_operation_outcome(outcome, state.gas_limit);
PendingOperationResult::Confirm(ConfirmReason::SubmittedBySelf)
}
Err(e) => {
error!(error=?e, "Error when processing message");
return PendingOperationResult::Reprepare(ReprepareReason::ErrorSubmitting);
}
}
}
@ -457,6 +492,14 @@ impl PendingOperation for PendingMessage {
fn try_get_mailbox(&self) -> Option<Arc<dyn Mailbox>> {
Some(self.ctx.destination_mailbox.clone())
}
fn get_metric(&self) -> Option<Arc<IntGauge>> {
self.metric.clone()
}
fn set_metric(&mut self, metric: Arc<IntGauge>) {
self.metric = Some(metric);
}
}
impl PendingMessage {
@ -510,9 +553,9 @@ impl PendingMessage {
fn on_reconfirm<E: Debug>(&mut self, err: Option<E>, reason: &str) -> PendingOperationResult {
self.inc_attempts();
if let Some(e) = err {
warn!(error = ?e, id = ?self.id(), "Reconfirming message: {}", reason.clone());
warn!(error = ?e, id = ?self.id(), "Reconfirming message: {}", reason);
} else {
warn!(id = ?self.id(), "Reconfirming message: {}", reason.clone());
warn!(id = ?self.id(), "Reconfirming message: {}", reason);
}
PendingOperationResult::NotReady
}

@ -11,7 +11,7 @@ use derive_new::new;
use ethers::utils::hex;
use eyre::Result;
use hyperlane_base::{
db::{HyperlaneRocksDB, ProcessMessage},
db::{HyperlaneDb, HyperlaneRocksDB},
CoreMetrics,
};
use hyperlane_core::{HyperlaneDomain, HyperlaneMessage, QueueOperation};
@ -52,7 +52,7 @@ struct ForwardBackwardIterator {
impl ForwardBackwardIterator {
#[instrument(skip(db), ret)]
fn new(db: Arc<dyn ProcessMessage>) -> Self {
fn new(db: Arc<dyn HyperlaneDb>) -> Self {
let high_nonce = db.retrieve_highest_seen_message_nonce().ok().flatten();
let domain = db.domain().name().to_owned();
let high_nonce_iter = DirectionalNonceIterator::new(
@ -125,7 +125,7 @@ enum NonceDirection {
struct DirectionalNonceIterator {
nonce: Option<u32>,
direction: NonceDirection,
db: Arc<dyn ProcessMessage>,
db: Arc<dyn HyperlaneDb>,
domain_name: String,
}
@ -163,7 +163,7 @@ impl DirectionalNonceIterator {
if let Some(message) = self.indexed_message_with_nonce()? {
Self::update_max_nonce_gauge(&message, metrics);
if !self.is_message_processed()? {
debug!(?message, iterator=?self, "Found processable message");
debug!(hyp_message=?message, iterator=?self, "Found processable message");
return Ok(MessageStatus::Processable(message));
} else {
return Ok(MessageStatus::Processed);
@ -196,7 +196,10 @@ impl DirectionalNonceIterator {
let Some(nonce) = self.nonce else {
return Ok(false);
};
let processed = self.db.retrieve_processed_by_nonce(nonce)?.unwrap_or(false);
let processed = self
.db
.retrieve_processed_by_nonce(&nonce)?
.unwrap_or(false);
if processed {
trace!(
nonce,
@ -258,8 +261,8 @@ impl ProcessorExt for MessageProcessor {
}
// Skip if the message is blacklisted
if self.message_whitelist.msg_matches(&msg, false) {
debug!(?msg, blacklist=?self.message_whitelist, "Message blacklisted, skipping");
if self.message_blacklist.msg_matches(&msg, false) {
debug!(?msg, blacklist=?self.message_blacklist, "Message blacklisted, skipping");
return Ok(());
}
@ -326,7 +329,7 @@ impl MessageProcessor {
send_channels,
destination_ctxs,
metric_app_contexts,
nonce_iterator: ForwardBackwardIterator::new(Arc::new(db) as Arc<dyn ProcessMessage>),
nonce_iterator: ForwardBackwardIterator::new(Arc::new(db) as Arc<dyn HyperlaneDb>),
}
}
@ -394,9 +397,16 @@ mod test {
use super::*;
use hyperlane_base::{
db::{test_utils, DbResult, HyperlaneRocksDB},
db::{
test_utils, DbResult, HyperlaneRocksDB, InterchainGasExpenditureData,
InterchainGasPaymentData,
},
settings::{ChainConf, ChainConnectionConf, Settings},
};
use hyperlane_core::{
test_utils::dummy_domain, GasPaymentKey, InterchainGasPayment, InterchainGasPaymentMeta,
MerkleTreeInsertion, PendingOperationStatus, H256,
};
use hyperlane_test::mocks::{MockMailboxContract, MockValidatorAnnounceContract};
use prometheus::{IntCounter, Registry};
use tokio::{
@ -527,17 +537,6 @@ mod test {
}
}
fn dummy_domain(domain_id: u32, name: &str) -> HyperlaneDomain {
let test_domain = HyperlaneDomain::new_test_domain(name);
HyperlaneDomain::Unknown {
domain_id,
domain_name: name.to_owned(),
domain_type: test_domain.domain_type(),
domain_protocol: test_domain.domain_protocol(),
domain_technical_stack: test_domain.domain_technical_stack(),
}
}
/// Only adds database entries to the pending message prefix if the message's
/// retry count is greater than zero
fn persist_retried_messages(
@ -591,11 +590,153 @@ mod test {
fn fmt<'a>(&self, f: &mut std::fmt::Formatter<'a>) -> std::fmt::Result;
}
impl ProcessMessage for Db {
impl HyperlaneDb for Db {
/// Retrieve the nonce of the highest processed message we're aware of
fn retrieve_highest_seen_message_nonce(&self) -> DbResult<Option<u32>>;
/// Retrieve a message by its nonce
fn retrieve_message_by_nonce(&self, nonce: u32) -> DbResult<Option<HyperlaneMessage>>;
fn retrieve_processed_by_nonce(&self, nonce: u32) -> DbResult<Option<bool>>;
/// Retrieve whether a message has been processed
fn retrieve_processed_by_nonce(&self, nonce: &u32) -> DbResult<Option<bool>>;
/// Get the origin domain of the database
fn domain(&self) -> &HyperlaneDomain;
fn store_message_id_by_nonce(&self, nonce: &u32, id: &H256) -> DbResult<()>;
fn retrieve_message_id_by_nonce(&self, nonce: &u32) -> DbResult<Option<H256>>;
fn store_message_by_id(&self, id: &H256, message: &HyperlaneMessage) -> DbResult<()>;
fn retrieve_message_by_id(&self, id: &H256) -> DbResult<Option<HyperlaneMessage>>;
fn store_dispatched_block_number_by_nonce(
&self,
nonce: &u32,
block_number: &u64,
) -> DbResult<()>;
fn retrieve_dispatched_block_number_by_nonce(&self, nonce: &u32) -> DbResult<Option<u64>>;
/// Store whether a message was processed by its nonce
fn store_processed_by_nonce(&self, nonce: &u32, processed: &bool) -> DbResult<()>;
fn store_processed_by_gas_payment_meta(
&self,
meta: &InterchainGasPaymentMeta,
processed: &bool,
) -> DbResult<()>;
fn retrieve_processed_by_gas_payment_meta(
&self,
meta: &InterchainGasPaymentMeta,
) -> DbResult<Option<bool>>;
fn store_interchain_gas_expenditure_data_by_message_id(
&self,
message_id: &H256,
data: &InterchainGasExpenditureData,
) -> DbResult<()>;
fn retrieve_interchain_gas_expenditure_data_by_message_id(
&self,
message_id: &H256,
) -> DbResult<Option<InterchainGasExpenditureData>>;
/// Store the status of an operation by its message id
fn store_status_by_message_id(
&self,
message_id: &H256,
status: &PendingOperationStatus,
) -> DbResult<()>;
/// Retrieve the status of an operation by its message id
fn retrieve_status_by_message_id(
&self,
message_id: &H256,
) -> DbResult<Option<PendingOperationStatus>>;
fn store_interchain_gas_payment_data_by_gas_payment_key(
&self,
key: &GasPaymentKey,
data: &InterchainGasPaymentData,
) -> DbResult<()>;
fn retrieve_interchain_gas_payment_data_by_gas_payment_key(
&self,
key: &GasPaymentKey,
) -> DbResult<Option<InterchainGasPaymentData>>;
fn store_gas_payment_by_sequence(
&self,
sequence: &u32,
payment: &InterchainGasPayment,
) -> DbResult<()>;
fn retrieve_gas_payment_by_sequence(
&self,
sequence: &u32,
) -> DbResult<Option<InterchainGasPayment>>;
fn store_gas_payment_block_by_sequence(
&self,
sequence: &u32,
block_number: &u64,
) -> DbResult<()>;
fn retrieve_gas_payment_block_by_sequence(&self, sequence: &u32) -> DbResult<Option<u64>>;
/// Store the retry count for a pending message by its message id
fn store_pending_message_retry_count_by_message_id(
&self,
message_id: &H256,
count: &u32,
) -> DbResult<()>;
/// Retrieve the retry count for a pending message by its message id
fn retrieve_pending_message_retry_count_by_message_id(
&self,
message_id: &H256,
) -> DbResult<Option<u32>>;
fn store_merkle_tree_insertion_by_leaf_index(
&self,
leaf_index: &u32,
insertion: &MerkleTreeInsertion,
) -> DbResult<()>;
/// Retrieve the merkle tree insertion event by its leaf index
fn retrieve_merkle_tree_insertion_by_leaf_index(
&self,
leaf_index: &u32,
) -> DbResult<Option<MerkleTreeInsertion>>;
fn store_merkle_leaf_index_by_message_id(
&self,
message_id: &H256,
leaf_index: &u32,
) -> DbResult<()>;
/// Retrieve the merkle leaf index of a message in the merkle tree
fn retrieve_merkle_leaf_index_by_message_id(&self, message_id: &H256) -> DbResult<Option<u32>>;
fn store_merkle_tree_insertion_block_number_by_leaf_index(
&self,
leaf_index: &u32,
block_number: &u64,
) -> DbResult<()>;
fn retrieve_merkle_tree_insertion_block_number_by_leaf_index(
&self,
leaf_index: &u32,
) -> DbResult<Option<u64>>;
fn store_highest_seen_message_nonce_number(&self, nonce: &u32) -> DbResult<()>;
/// Retrieve the nonce of the highest processed message we're aware of
fn retrieve_highest_seen_message_nonce_number(&self) -> DbResult<Option<u32>>;
}
}

@ -41,7 +41,7 @@ use crate::{
pending_message::{MessageContext, MessageSubmissionMetrics},
processor::{MessageProcessor, MessageProcessorMetrics},
},
server::{self as relayer_server, MessageRetryRequest},
server::{self as relayer_server},
settings::{matching_list::MatchingList, RelayerSettings},
};
use crate::{
@ -311,7 +311,7 @@ impl BaseAgent for Relayer {
}));
tasks.push(console_server.instrument(info_span!("Tokio console server")));
}
let sender = BroadcastSender::<MessageRetryRequest>::new(ENDPOINT_MESSAGES_QUEUE_SIZE);
let sender = BroadcastSender::<MatchingList>::new(ENDPOINT_MESSAGES_QUEUE_SIZE);
// send channels by destination chain
let mut send_channels = HashMap::with_capacity(self.destination_chains.len());
let mut prep_queues = HashMap::with_capacity(self.destination_chains.len());

@ -3,8 +3,8 @@ use axum::{
routing, Router,
};
use derive_new::new;
use hyperlane_core::QueueOperation;
use serde::Deserialize;
use hyperlane_core::{QueueOperation, H256};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
@ -39,12 +39,27 @@ async fn list_operations(
format_queue(op_queue.clone()).await
}
#[derive(Debug, Serialize)]
struct OperationWithId<'a> {
id: H256,
operation: &'a QueueOperation,
}
impl<'a> OperationWithId<'a> {
fn new(operation: &'a QueueOperation) -> Self {
Self {
id: operation.id(),
operation,
}
}
}
pub async fn format_queue(queue: OperationPriorityQueue) -> String {
let res: Result<Vec<Value>, _> = queue
.lock()
.await
.iter()
.map(|reverse| serde_json::to_value(&reverse.0))
.map(|reverse| serde_json::to_value(OperationWithId::new(&reverse.0)))
.collect();
match res.and_then(|v| serde_json::to_string_pretty(&v)) {
Ok(s) => s,
@ -64,8 +79,6 @@ impl ListOperationsApi {
}
}
// TODO: there's some duplication between the setup for these tests and the one in `message_retry.rs`,
// which should be refactored into a common test setup.
#[cfg(test)]
mod tests {
use crate::msg::op_queue::{
@ -94,6 +107,7 @@ mod tests {
let list_operations_api = ListOperationsApi::new(op_queues_map);
let (path, router) = list_operations_api.get_route();
let app = Router::new().nest(path, router);
// Running the app in the background using a test server
@ -108,15 +122,58 @@ mod tests {
#[tokio::test]
async fn test_message_id_retry() {
let (addr, op_queue) = setup_test_server();
let dummy_operation_1 =
Box::new(MockPendingOperation::new(1, DUMMY_DOMAIN.into())) as QueueOperation;
let dummy_operation_2 =
Box::new(MockPendingOperation::new(2, DUMMY_DOMAIN.into())) as QueueOperation;
let v = vec![
serde_json::to_value(&dummy_operation_1).unwrap(),
serde_json::to_value(&dummy_operation_2).unwrap(),
];
let expected_response = serde_json::to_string_pretty(&v).unwrap();
let id_1 = "0x1acbee9798118b11ebef0d94b0a2936eafd58e3bfab91b05da875825c4a1c39b";
let id_2 = "0x51e7be221ce90a49dee46ca0d0270c48d338a7b9d85c2a89d83fac0816571914";
let sender_address = "0x586d41b02fb35df0f84ecb2b73e076b40c929ee3e1ceeada9a078aa7b46d3b08";
let recipient_address =
"0x586d41b02fb35df0f84ecb2b73e076b40c929ee3e1ceeada9a078aa7b46d3b08";
let dummy_operation_1 = Box::new(
MockPendingOperation::new(1, DUMMY_DOMAIN.into())
.with_id(id_1)
.with_sender_address(sender_address)
.with_recipient_address(recipient_address),
) as QueueOperation;
let dummy_operation_2 = Box::new(
MockPendingOperation::new(2, DUMMY_DOMAIN.into())
.with_id(id_2)
.with_sender_address(sender_address)
.with_recipient_address(recipient_address),
) as QueueOperation;
// The reason there already is an id inside `operation` here is because it's a field on `MockPendingOperation` - that field is
// missing on `PendingMessage` because it's derived, hence the need to hence the need to have it explicitly serialized alongside the operation.
let expected_response = r#"[
{
"id": "0x1acbee9798118b11ebef0d94b0a2936eafd58e3bfab91b05da875825c4a1c39b",
"operation": {
"destination_domain": {
"Known": "Arbitrum"
},
"destination_domain_id": 42161,
"id": "0x1acbee9798118b11ebef0d94b0a2936eafd58e3bfab91b05da875825c4a1c39b",
"origin_domain_id": 0,
"recipient_address": "0x586d41b02fb35df0f84ecb2b73e076b40c929ee3e1ceeada9a078aa7b46d3b08",
"seconds_to_next_attempt": 1,
"sender_address": "0x586d41b02fb35df0f84ecb2b73e076b40c929ee3e1ceeada9a078aa7b46d3b08",
"type": "MockPendingOperation"
}
},
{
"id": "0x51e7be221ce90a49dee46ca0d0270c48d338a7b9d85c2a89d83fac0816571914",
"operation": {
"destination_domain": {
"Known": "Arbitrum"
},
"destination_domain_id": 42161,
"id": "0x51e7be221ce90a49dee46ca0d0270c48d338a7b9d85c2a89d83fac0816571914",
"origin_domain_id": 0,
"recipient_address": "0x586d41b02fb35df0f84ecb2b73e076b40c929ee3e1ceeada9a078aa7b46d3b08",
"seconds_to_next_attempt": 2,
"sender_address": "0x586d41b02fb35df0f84ecb2b73e076b40c929ee3e1ceeada9a078aa7b46d3b08",
"type": "MockPendingOperation"
}
}
]"#;
op_queue.lock().await.push(Reverse(dummy_operation_1));
op_queue.lock().await.push(Reverse(dummy_operation_2));
@ -130,6 +187,8 @@ mod tests {
// Check that the response status code is OK
assert_eq!(response.status(), StatusCode::OK);
assert_eq!(response.text().await.unwrap(), expected_response);
let response_text = response.text().await.unwrap();
assert_eq!(response_text, expected_response);
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save