Merge branch 'main' into cli-2.0

noah/no-warp
Connor McEwen 5 months ago
commit 70c4c28c2f
  1. 6
      .changeset/mean-impalas-leave.md
  2. 5
      .changeset/olive-geckos-behave.md
  3. 20
      .changeset/pre.json
  4. 6
      .changeset/sixty-ducks-brush.md
  5. 4
      .github/workflows/agent-release-artifacts.yml
  6. 10
      .github/workflows/monorepo-docker.yml
  7. 4
      .github/workflows/release.yml
  8. 10
      .github/workflows/rust-docker.yml
  9. 2
      .github/workflows/rust-skipped.yml
  10. 13
      .github/workflows/rust.yml
  11. 10
      .github/workflows/static-analysis.yml
  12. 8
      .github/workflows/storage-analysis.yml
  13. 109
      .github/workflows/test-skipped.yml
  14. 86
      .github/workflows/test.yml
  15. 6
      CODE_OF_CONDUCT.md
  16. 6
      README.md
  17. 5
      funding.json
  18. 2
      package.json
  19. 2
      rust/.vscode/extensions.json
  20. 2
      rust/Cargo.lock
  21. 2
      rust/agents/relayer/Cargo.toml
  22. 10
      rust/agents/relayer/src/lib.rs
  23. 10
      rust/agents/relayer/src/main.rs
  24. 9
      rust/agents/relayer/src/msg/gas_payment/mod.rs
  25. 3
      rust/agents/relayer/src/msg/mod.rs
  26. 65
      rust/agents/relayer/src/msg/op_queue.rs
  27. 27
      rust/agents/relayer/src/msg/op_submitter.rs
  28. 68
      rust/agents/relayer/src/msg/pending_message.rs
  29. 16
      rust/agents/relayer/src/msg/processor.rs
  30. 53
      rust/agents/relayer/src/relayer.rs
  31. 18
      rust/agents/relayer/src/server.rs
  32. 40
      rust/agents/scraper/src/agent.rs
  33. 5
      rust/agents/validator/src/validator.rs
  34. 2
      rust/chains/hyperlane-cosmos/src/interchain_gas.rs
  35. 4
      rust/chains/hyperlane-cosmos/src/mailbox.rs
  36. 2
      rust/chains/hyperlane-cosmos/src/merkle_tree_hook.rs
  37. 34
      rust/chains/hyperlane-ethereum/src/contracts/interchain_gas.rs
  38. 28
      rust/chains/hyperlane-ethereum/src/contracts/mailbox.rs
  39. 31
      rust/chains/hyperlane-ethereum/src/contracts/merkle_tree_hook.rs
  40. 5
      rust/chains/hyperlane-ethereum/src/contracts/mod.rs
  41. 48
      rust/chains/hyperlane-ethereum/src/contracts/utils.rs
  42. 2
      rust/chains/hyperlane-fuel/src/interchain_gas.rs
  43. 4
      rust/chains/hyperlane-fuel/src/mailbox.rs
  44. 2
      rust/chains/hyperlane-sealevel/src/interchain_gas.rs
  45. 4
      rust/chains/hyperlane-sealevel/src/mailbox.rs
  46. 4
      rust/chains/hyperlane-sealevel/src/merkle_tree_hook.rs
  47. 15
      rust/hyperlane-base/src/contract_sync/cursors/mod.rs
  48. 12
      rust/hyperlane-base/src/contract_sync/cursors/rate_limited.rs
  49. 20
      rust/hyperlane-base/src/contract_sync/cursors/sequence_aware/backward.rs
  50. 14
      rust/hyperlane-base/src/contract_sync/cursors/sequence_aware/forward.rs
  51. 1
      rust/hyperlane-base/src/contract_sync/cursors/sequence_aware/mod.rs
  52. 203
      rust/hyperlane-base/src/contract_sync/mod.rs
  53. 6
      rust/hyperlane-base/src/db/rocks/hyperlane_db.rs
  54. 4
      rust/hyperlane-base/src/settings/base.rs
  55. 2
      rust/hyperlane-core/Cargo.toml
  56. 30
      rust/hyperlane-core/src/chain.rs
  57. 8
      rust/hyperlane-core/src/traits/cursor.rs
  58. 12
      rust/hyperlane-core/src/traits/indexer.rs
  59. 2
      rust/hyperlane-core/src/traits/mod.rs
  60. 56
      rust/hyperlane-core/src/traits/pending_operation.rs
  61. 50
      rust/hyperlane-core/src/types/channel.rs
  62. 4
      rust/hyperlane-core/src/types/mod.rs
  63. 27
      rust/hyperlane-core/src/types/primitive_types.rs
  64. 2
      rust/utils/backtrace-oneline/src/lib.rs
  65. 2
      rust/utils/run-locally/Cargo.toml
  66. 4
      rust/utils/run-locally/src/config.rs
  67. 2
      rust/utils/run-locally/src/cosmos/cli.rs
  68. 4
      rust/utils/run-locally/src/cosmos/mod.rs
  69. 2
      rust/utils/run-locally/src/ethereum/mod.rs
  70. 35
      rust/utils/run-locally/src/invariants.rs
  71. 153
      rust/utils/run-locally/src/main.rs
  72. 55
      rust/utils/run-locally/src/program.rs
  73. 2
      rust/utils/run-locally/src/solana.rs
  74. 18
      rust/utils/run-locally/src/utils.rs
  75. 22
      solidity/CHANGELOG.md
  76. 38
      solidity/contracts/client/GasRouter.sol
  77. 81
      solidity/contracts/client/MailboxClient.sol
  78. 65
      solidity/contracts/client/Router.sol
  79. 60
      solidity/contracts/test/ERC20Test.sol
  80. 2
      solidity/contracts/test/TestGasRouter.sol
  81. 16
      solidity/contracts/token/HypNative.sol
  82. 4
      solidity/contracts/token/README.md
  83. 12
      solidity/contracts/token/extensions/HypNativeScaled.sol
  84. 4
      solidity/contracts/token/extensions/HypXERC20.sol
  85. 28
      solidity/contracts/token/extensions/HypXERC20Lockbox.sol
  86. 18
      solidity/contracts/token/interfaces/IXERC20.sol
  87. 6
      solidity/contracts/token/libs/FastTokenRouter.sol
  88. 67
      solidity/contracts/token/libs/TokenRouter.sol
  89. 2
      solidity/coverage.sh
  90. 6
      solidity/foundry.toml
  91. 7
      solidity/package.json
  92. 40
      solidity/script/avs/eigenlayer_addresses.json
  93. 4
      solidity/script/xerc20/.env.blast
  94. 5
      solidity/script/xerc20/.env.ethereum
  95. 50
      solidity/script/xerc20/ApproveLockbox.s.sol
  96. 37
      solidity/script/xerc20/GrantLimits.s.sol
  97. 127
      solidity/script/xerc20/ezETH.s.sol
  98. 107
      solidity/test/AnvilRPC.sol
  99. 7
      solidity/test/InterchainAccountRouter.t.sol
  100. 110
      solidity/test/token/HypERC20.t.sol
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,6 @@
---
'@hyperlane-xyz/core': patch
'@hyperlane-xyz/helloworld': patch
---
fix: `TokenRouter.transferRemote` with hook overrides

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/sdk': patch
---
Do not consider xERC20 a collateral standard to fix fungibility checking logic while maintaining mint limit checking

@ -1,20 +0,0 @@
{
"mode": "pre",
"tag": "alpha",
"initialVersions": {
"@hyperlane-xyz/core": "4.0.0-alpha.0",
"@hyperlane-xyz/ccip-server": "4.0.0-alpha.0",
"@hyperlane-xyz/cli": "4.0.0-alpha.0",
"@hyperlane-xyz/helloworld": "4.0.0-alpha.0",
"@hyperlane-xyz/infra": "4.0.0-alpha.0",
"@hyperlane-xyz/sdk": "4.0.0-alpha.0",
"@hyperlane-xyz/utils": "4.0.0-alpha.0"
},
"changesets": [
"bright-emus-double",
"five-baboons-smoke",
"late-rings-attack",
"sharp-geckos-wash",
"slimy-toys-argue"
]
}

@ -0,0 +1,6 @@
---
'@hyperlane-xyz/cli': patch
'@hyperlane-xyz/sdk': patch
---
Support priorityFee fetching from RPC and some better logging

@ -43,7 +43,7 @@ jobs:
runs-on: ${{ matrix.OS }} runs-on: ${{ matrix.OS }}
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: ubuntu setup - name: ubuntu setup
if: ${{ matrix.OS == 'larger-runner' }} if: ${{ matrix.OS == 'larger-runner' }}
run: | run: |
@ -74,7 +74,7 @@ jobs:
run: chmod ug+x,-w relayer scraper validator run: chmod ug+x,-w relayer scraper validator
working-directory: rust/target/${{ matrix.TARGET }}/release working-directory: rust/target/${{ matrix.TARGET }}/release
- name: upload binaries - name: upload binaries
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: ${{ matrix.TARGET }}-${{ needs.prepare.outputs.tag_sha }}-${{ needs.prepare.outputs.tag_date }} name: ${{ matrix.TARGET }}-${{ needs.prepare.outputs.tag_sha }}-${{ needs.prepare.outputs.tag_date }}
path: | path: |

@ -36,7 +36,7 @@ jobs:
if: needs.check-env.outputs.gcloud-service-key == 'true' if: needs.check-env.outputs.gcloud-service-key == 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
@ -48,7 +48,7 @@ jobs:
echo "TAG_SHA=$(echo '${{ github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT echo "TAG_SHA=$(echo '${{ github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT
- name: Docker meta - name: Docker meta
id: meta id: meta
uses: docker/metadata-action@v3 uses: docker/metadata-action@v5
with: with:
# list of Docker images to use as base name for tags # list of Docker images to use as base name for tags
images: | images: |
@ -59,15 +59,15 @@ jobs:
type=ref,event=pr type=ref,event=pr
type=raw,value=${{ steps.taggen.outputs.TAG_SHA }}-${{ steps.taggen.outputs.TAG_DATE }} type=raw,value=${{ steps.taggen.outputs.TAG_SHA }}-${{ steps.taggen.outputs.TAG_DATE }}
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v3
- name: Login to GCR - name: Login to GCR
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
registry: gcr.io registry: gcr.io
username: _json_key username: _json_key
password: ${{ secrets.GCLOUD_SERVICE_KEY }} password: ${{ secrets.GCLOUD_SERVICE_KEY }}
- name: Build and push - name: Build and push
uses: docker/build-push-action@v3 uses: docker/build-push-action@v5
with: with:
context: ./ context: ./
file: ./Dockerfile file: ./Dockerfile

@ -19,14 +19,14 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout Repo - name: Checkout Repo
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
# check out full history # check out full history
fetch-depth: 0 fetch-depth: 0
submodules: recursive submodules: recursive
- name: Setup Node.js 18.x - name: Setup Node.js 18.x
uses: actions/setup-node@v3 uses: actions/setup-node@v4
with: with:
node-version: 18.x node-version: 18.x

@ -33,7 +33,7 @@ jobs:
if: needs.check-env.outputs.gcloud-service-key == 'true' if: needs.check-env.outputs.gcloud-service-key == 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Generate tag data - name: Generate tag data
@ -43,7 +43,7 @@ jobs:
echo "TAG_SHA=$(echo '${{ github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT echo "TAG_SHA=$(echo '${{ github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT
- name: Docker meta - name: Docker meta
id: meta id: meta
uses: docker/metadata-action@v3 uses: docker/metadata-action@v5
with: with:
# list of Docker images to use as base name for tags # list of Docker images to use as base name for tags
images: | images: |
@ -54,15 +54,15 @@ jobs:
type=ref,event=pr type=ref,event=pr
type=raw,value=${{ steps.taggen.outputs.TAG_SHA }}-${{ steps.taggen.outputs.TAG_DATE }} type=raw,value=${{ steps.taggen.outputs.TAG_SHA }}-${{ steps.taggen.outputs.TAG_DATE }}
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v3
- name: Login to GCR - name: Login to GCR
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
registry: gcr.io registry: gcr.io
username: _json_key username: _json_key
password: ${{ secrets.GCLOUD_SERVICE_KEY }} password: ${{ secrets.GCLOUD_SERVICE_KEY }}
- name: Build and push - name: Build and push
uses: docker/build-push-action@v3 uses: docker/build-push-action@v5
with: with:
context: ./rust context: ./rust
file: ./rust/Dockerfile file: ./rust/Dockerfile

@ -18,12 +18,10 @@ env:
jobs: jobs:
test-rs: test-rs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- run: 'echo "No test required" ' - run: 'echo "No test required" '
lint-rs: lint-rs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- run: 'echo "No lint required" ' - run: 'echo "No lint required" '

@ -6,6 +6,7 @@ on:
paths: paths:
- 'rust/**' - 'rust/**'
- .github/workflows/rust.yml - .github/workflows/rust.yml
- '!*.md'
# Support for merge queues # Support for merge queues
merge_group: merge_group:
# Allows you to run this workflow manually from the Actions tab # Allows you to run this workflow manually from the Actions tab
@ -28,7 +29,7 @@ jobs:
runs-on: larger-runner runs-on: larger-runner
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
@ -38,8 +39,8 @@ jobs:
- name: rust cache - name: rust cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
prefix-key: "v2-rust" prefix-key: 'v2-rust'
shared-key: "test" shared-key: 'test'
workspaces: | workspaces: |
./rust ./rust
- name: Free disk space - name: Free disk space
@ -56,7 +57,7 @@ jobs:
runs-on: larger-runner runs-on: larger-runner
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
@ -68,8 +69,8 @@ jobs:
- name: rust cache - name: rust cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
prefix-key: "v2-rust" prefix-key: 'v2-rust'
shared-key: "lint" shared-key: 'lint'
workspaces: | workspaces: |
./rust ./rust
- name: Free disk space - name: Free disk space

@ -18,13 +18,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -35,13 +35,13 @@ jobs:
run: yarn install run: yarn install
- name: foundry-install - name: foundry-install
uses: onbjerg/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: forge-build - name: forge-build
run: cd solidity && forge build --build-info run: cd solidity && forge build --build-info
- name: Static analysis - name: Static analysis
uses: crytic/slither-action@v0.3.0 uses: crytic/slither-action@v0.4.0
id: slither id: slither
with: with:
target: 'solidity/' target: 'solidity/'
@ -51,6 +51,6 @@ jobs:
ignore-compile: true ignore-compile: true
- name: Upload SARIF file - name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v2 uses: github/codeql-action/upload-sarif@v3
with: with:
sarif_file: ${{ steps.slither.outputs.sarif }} sarif_file: ${{ steps.slither.outputs.sarif }}

@ -14,17 +14,17 @@ jobs:
steps: steps:
# Checkout the PR branch # Checkout the PR branch
- name: Checkout PR branch - name: Checkout PR branch
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
- uses: actions/setup-node@v3 - uses: actions/setup-node@v4
with: with:
node-version: 18 node-version: 18
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -35,7 +35,7 @@ jobs:
run: yarn install run: yarn install
- name: foundry-install - name: foundry-install
uses: onbjerg/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
# Run the command on PR branch # Run the command on PR branch
- name: Run command on PR branch - name: Run command on PR branch

@ -0,0 +1,109 @@
name: test
on:
push:
branches: [main]
pull_request:
branches:
- '*'
paths:
- '*.md'
- '!**/*'
merge_group:
concurrency:
group: e2e-${{ github.ref }}
cancel-in-progress: ${{ github.ref_name != 'main' }}
jobs:
yarn-install:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "yarn-install job passed"
yarn-build:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "yarn-build job passed"
checkout-registry:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "checkout-registry job passed"
lint-prettier:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "lint-prettier job passed"
yarn-test:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "yarn-test job passed"
agent-configs:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
environment: [mainnet3, testnet4]
steps:
- name: Instant pass
run: echo "agent-configs job passed"
e2e-matrix:
runs-on: ubuntu-latest
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main') || github.event_name == 'merge_group'
strategy:
matrix:
e2e-type: [cosmwasm, non-cosmwasm]
steps:
- name: Instant pass
run: echo "e2e-matrix job passed"
e2e:
runs-on: ubuntu-latest
if: always()
steps:
- name: Instant pass
run: echo "e2e job passed"
cli-e2e:
runs-on: ubuntu-latest
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main') || github.event_name == 'merge_group'
strategy:
matrix:
include:
- test-type: preset_hook_enabled
- test-type: configure_hook_enabled
- test-type: pi_with_core_chain
steps:
- name: Instant pass
run: echo "cli-e2e job passed"
env-test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
environment: [mainnet3]
chain: [ethereum, arbitrum, optimism, inevm, viction]
module: [core, igp]
include:
- environment: testnet4
chain: sepolia
module: core
steps:
- name: Instant pass
run: echo "env-test job passed"
coverage:
runs-on: ubuntu-latest
steps:
- name: Instant pass
run: echo "coverage job passed"

@ -7,6 +7,8 @@ on:
pull_request: pull_request:
branches: branches:
- '*' # run against all branches - '*' # run against all branches
paths-ignore:
- '*.md'
# Support for merge queues # Support for merge queues
merge_group: merge_group:
# Allows you to run this workflow manually from the Actions tab # Allows you to run this workflow manually from the Actions tab
@ -28,17 +30,17 @@ jobs:
yarn-install: yarn-install:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/setup-node@v3 - uses: actions/setup-node@v4
with: with:
node-version: 18 node-version: 18
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -59,14 +61,14 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [yarn-install] needs: [yarn-install]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
fetch-depth: 0 fetch-depth: 0
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -74,7 +76,7 @@ jobs:
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }} key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache - name: build-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
./* ./*
@ -87,7 +89,7 @@ jobs:
checkout-registry: checkout-registry:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
repository: hyperlane-xyz/hyperlane-registry repository: hyperlane-xyz/hyperlane-registry
ref: main ref: main
@ -103,7 +105,7 @@ jobs:
- run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV - run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV
- name: registry-cache - name: registry-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
${{ env.REGISTRY_URI_ABSOLUTE }} ${{ env.REGISTRY_URI_ABSOLUTE }}
@ -113,14 +115,14 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [yarn-install] needs: [yarn-install]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
# check out full history # check out full history
fetch-depth: 0 fetch-depth: 0
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -143,17 +145,17 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [yarn-build, checkout-registry] needs: [yarn-build, checkout-registry]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
fetch-depth: 0 fetch-depth: 0
- name: foundry-install - name: foundry-install
uses: onbjerg/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: build-cache - name: build-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
./* ./*
@ -165,7 +167,7 @@ jobs:
- run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV - run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV
- name: registry-cache - name: registry-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
${{ env.REGISTRY_URI_ABSOLUTE }} ${{ env.REGISTRY_URI_ABSOLUTE }}
@ -182,13 +184,13 @@ jobs:
matrix: matrix:
environment: [mainnet3, testnet4] environment: [mainnet3, testnet4]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
fetch-depth: 0 fetch-depth: 0
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -196,7 +198,7 @@ jobs:
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }} key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache - name: build-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
./* ./*
@ -208,7 +210,7 @@ jobs:
- run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV - run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV
- name: registry-cache - name: registry-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
${{ env.REGISTRY_URI_ABSOLUTE }} ${{ env.REGISTRY_URI_ABSOLUTE }}
@ -232,17 +234,17 @@ jobs:
matrix: matrix:
e2e-type: [cosmwasm, non-cosmwasm] e2e-type: [cosmwasm, non-cosmwasm]
steps: steps:
- uses: actions/setup-node@v3 - uses: actions/setup-node@v4
with: with:
node-version: 18 node-version: 18
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
- name: foundry-install - name: foundry-install
uses: onbjerg/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: setup rust - name: setup rust
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
@ -265,7 +267,7 @@ jobs:
make-default: true make-default: true
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -273,7 +275,7 @@ jobs:
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }} key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache - name: build-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
./* ./*
@ -285,14 +287,14 @@ jobs:
- run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV - run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV
- name: registry-cache - name: registry-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
${{ env.REGISTRY_URI_ABSOLUTE }} ${{ env.REGISTRY_URI_ABSOLUTE }}
key: hyperlane-registry-${{ github.event.pull_request.head.sha || github.sha }} key: hyperlane-registry-${{ github.event.pull_request.head.sha || github.sha }}
- name: cargo-cache - name: cargo-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
~/.cargo ~/.cargo
@ -327,7 +329,7 @@ jobs:
cli-e2e: cli-e2e:
runs-on: larger-runner runs-on: larger-runner
if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main' || github.base_ref == 'cli-2.0') || github.event_name == 'merge_group' if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main') || github.event_name == 'merge_group'
needs: [yarn-build, checkout-registry] needs: [yarn-build, checkout-registry]
strategy: strategy:
matrix: matrix:
@ -336,17 +338,17 @@ jobs:
- test-type: configure_hook_enabled - test-type: configure_hook_enabled
- test-type: pi_with_core_chain - test-type: pi_with_core_chain
steps: steps:
- uses: actions/setup-node@v3 - uses: actions/setup-node@v4
with: with:
node-version: 18 node-version: 18
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
submodules: recursive submodules: recursive
- name: foundry-install - name: foundry-install
uses: onbjerg/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: setup rust - name: setup rust
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
@ -369,7 +371,7 @@ jobs:
make-default: true make-default: true
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -377,7 +379,7 @@ jobs:
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }} key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache - name: build-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
./* ./*
@ -389,14 +391,14 @@ jobs:
- run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV - run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV
- name: registry-cache - name: registry-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
${{ env.REGISTRY_URI_ABSOLUTE }} ${{ env.REGISTRY_URI_ABSOLUTE }}
key: hyperlane-registry-${{ github.event.pull_request.head.sha || github.sha }} key: hyperlane-registry-${{ github.event.pull_request.head.sha || github.sha }}
- name: cargo-cache - name: cargo-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
~/.cargo ~/.cargo
@ -420,15 +422,15 @@ jobs:
module: core module: core
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: foundry-install - name: foundry-install
uses: onbjerg/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: build-cache - name: build-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
./* ./*
@ -440,7 +442,7 @@ jobs:
- run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV - run: echo "REGISTRY_URI_ABSOLUTE=$(realpath $REGISTRY_URI)" >> $GITHUB_ENV
- name: registry-cache - name: registry-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
${{ env.REGISTRY_URI_ABSOLUTE }} ${{ env.REGISTRY_URI_ABSOLUTE }}
@ -454,13 +456,13 @@ jobs:
needs: [yarn-test] needs: [yarn-test]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
fetch-depth: 0 fetch-depth: 0
- name: yarn-cache - name: yarn-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
**/node_modules **/node_modules
@ -468,7 +470,7 @@ jobs:
key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }} key: ${{ runner.os }}-yarn-cache-${{ hashFiles('./yarn.lock') }}
- name: build-cache - name: build-cache
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
./* ./*
@ -476,7 +478,7 @@ jobs:
key: ${{ github.event.pull_request.head.sha || github.sha }} key: ${{ github.event.pull_request.head.sha || github.sha }}
- name: foundry-install - name: foundry-install
uses: onbjerg/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: Run tests with coverage - name: Run tests with coverage
run: yarn coverage run: yarn coverage
@ -484,6 +486,6 @@ jobs:
NODE_OPTIONS: --max_old_space_size=4096 NODE_OPTIONS: --max_old_space_size=4096
- name: Upload coverage reports to Codecov with GitHub Action - name: Upload coverage reports to Codecov with GitHub Action
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v4
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}

@ -9,10 +9,10 @@ This CoC applies to all members of the Hyperlane Network's community including,
**Code** **Code**
1. Never harass or bully anyone. Not verbally, not physically, not sexually. Harassment will not be tolerated. 1. Never harass or bully anyone. Not verbally, not physically, not sexually. Harassment will not be tolerated.
2. Never discrimnate on the basis of personal characteristics or group membership. 2. Never discriminate on the basis of personal characteristics or group membership.
3. Treat your fellow contributors with respect, fairness, and professionalism, especially in situations of high pressure. 3. Treat your fellow contributors with respect, fairness, and professionalism, especially in situations of high pressure.
4. Seek, offer, and accept objective critism of yours and others work, strive to acknowledge the contributions of others. 4. Seek, offer, and accept objective criticism of yours and others work, strive to acknowledge the contributions of others.
5. Be transparent and honest about your qualifications and any potential conflicts of interest. Transparency is a key tenant of the Hyperlane project and we expect it from all contributors. 5. Be transparent and honest about your qualifications and any potential conflicts of interest. Transparency is a key tenet of the Hyperlane project and we expect it from all contributors.
6. Bring an open and curious mind, the Hyperlane project is designed to enable developers to express their curiosity, experiment, and build things we couldn't have imagined ourselves. 6. Bring an open and curious mind, the Hyperlane project is designed to enable developers to express their curiosity, experiment, and build things we couldn't have imagined ourselves.
7. Stay on track - Do your best to avoid off-topic discussion and make sure you are posting to the correct channel and repositories. Distractions are costly and it is far too easy for work to go off track. 7. Stay on track - Do your best to avoid off-topic discussion and make sure you are posting to the correct channel and repositories. Distractions are costly and it is far too easy for work to go off track.
8. Step down properly - Think of your fellow contributors when you step down from the project. Contributors of open-source projects come and go. It is crucial that when you leave the project or reduce your contribution significantly you do so in a way that minimizes disruption and keeps continuity in mind. Concretely this means telling your fellow contributors you are leaving and taking the proper steps to enable a smooth transition for other contributors to pick up where you left off. 8. Step down properly - Think of your fellow contributors when you step down from the project. Contributors of open-source projects come and go. It is crucial that when you leave the project or reduce your contribution significantly you do so in a way that minimizes disruption and keeps continuity in mind. Concretely this means telling your fellow contributors you are leaving and taking the proper steps to enable a smooth transition for other contributors to pick up where you left off.

@ -103,3 +103,9 @@ See [`rust/README.md`](rust/README.md)
- Create a summary of change highlights - Create a summary of change highlights
- Create a "breaking changes" section with any changes required - Create a "breaking changes" section with any changes required
- Deploy agents with the new image tag (if it makes sense to) - Deploy agents with the new image tag (if it makes sense to)
### Releasing packages to NPM
We use [changesets](https://github.com/changesets/changesets) to release to NPM. You can use the `release` script in `package.json` to publish.
For an alpha or beta version, follow the directions [here](https://github.com/changesets/changesets/blob/main/docs/prereleases.md).

@ -0,0 +1,5 @@
{
"opRetro": {
"projectId": "0xa47182d330bd0c5c69b1418462f3f742099138f09bff057189cdd19676a6acd1"
}
}

@ -6,7 +6,7 @@
"@trivago/prettier-plugin-sort-imports": "^4.2.1", "@trivago/prettier-plugin-sort-imports": "^4.2.1",
"@typescript-eslint/eslint-plugin": "^7.4.0", "@typescript-eslint/eslint-plugin": "^7.4.0",
"@typescript-eslint/parser": "^7.4.0", "@typescript-eslint/parser": "^7.4.0",
"eslint": "^9.0.0", "eslint": "^8.57.0",
"eslint-config-prettier": "^9.1.0", "eslint-config-prettier": "^9.1.0",
"eslint-plugin-jest": "^28.2.0", "eslint-plugin-jest": "^28.2.0",
"husky": "^8.0.0", "husky": "^8.0.0",

@ -4,7 +4,7 @@
// List of extensions which should be recommended for users of this workspace. // List of extensions which should be recommended for users of this workspace.
"recommendations": [ "recommendations": [
"panicbit.cargo", "rust-lang.rust-analyzer",
"tamasfe.even-better-toml", "tamasfe.even-better-toml",
"rust-lang.rust-analyzer", "rust-lang.rust-analyzer",
], ],

2
rust/Cargo.lock generated

@ -7275,7 +7275,9 @@ dependencies = [
"macro_rules_attribute", "macro_rules_attribute",
"maplit", "maplit",
"nix 0.26.4", "nix 0.26.4",
"once_cell",
"regex", "regex",
"relayer",
"ripemd", "ripemd",
"serde", "serde",
"serde_json", "serde_json",

@ -38,7 +38,7 @@ tracing-futures.workspace = true
tracing.workspace = true tracing.workspace = true
hyperlane-core = { path = "../../hyperlane-core", features = ["agent", "async"] } hyperlane-core = { path = "../../hyperlane-core", features = ["agent", "async"] }
hyperlane-base = { path = "../../hyperlane-base" } hyperlane-base = { path = "../../hyperlane-base", features = ["test-utils"] }
hyperlane-ethereum = { path = "../../chains/hyperlane-ethereum" } hyperlane-ethereum = { path = "../../chains/hyperlane-ethereum" }
[dev-dependencies] [dev-dependencies]

@ -0,0 +1,10 @@
mod merkle_tree;
mod msg;
mod processor;
mod prover;
mod relayer;
mod server;
mod settings;
pub use msg::GAS_EXPENDITURE_LOG_MESSAGE;
pub use relayer::*;

@ -11,15 +11,7 @@ use eyre::Result;
use hyperlane_base::agent_main; use hyperlane_base::agent_main;
use crate::relayer::Relayer; use relayer::Relayer;
mod merkle_tree;
mod msg;
mod processor;
mod prover;
mod relayer;
mod server;
mod settings;
#[tokio::main(flavor = "multi_thread", worker_threads = 20)] #[tokio::main(flavor = "multi_thread", worker_threads = 20)]
async fn main() -> Result<()> { async fn main() -> Result<()> {

@ -19,6 +19,8 @@ use crate::{
mod policies; mod policies;
pub const GAS_EXPENDITURE_LOG_MESSAGE: &str = "Recording gas expenditure for message";
#[async_trait] #[async_trait]
pub trait GasPaymentPolicy: Debug + Send + Sync { pub trait GasPaymentPolicy: Debug + Send + Sync {
/// Returns Some(gas_limit) if the policy has approved the transaction or /// Returns Some(gas_limit) if the policy has approved the transaction or
@ -132,6 +134,13 @@ impl GasPaymentEnforcer {
} }
pub fn record_tx_outcome(&self, message: &HyperlaneMessage, outcome: TxOutcome) -> Result<()> { pub fn record_tx_outcome(&self, message: &HyperlaneMessage, outcome: TxOutcome) -> Result<()> {
// This log is required in E2E, hence the use of a `const`
debug!(
msg=%message,
?outcome,
"{}",
GAS_EXPENDITURE_LOG_MESSAGE,
);
self.db.process_gas_expenditure(InterchainGasExpenditure { self.db.process_gas_expenditure(InterchainGasExpenditure {
message_id: message.id(), message_id: message.id(),
gas_used: outcome.gas_used, gas_used: outcome.gas_used,

@ -30,5 +30,6 @@ pub(crate) mod metadata;
pub(crate) mod op_queue; pub(crate) mod op_queue;
pub(crate) mod op_submitter; pub(crate) mod op_submitter;
pub(crate) mod pending_message; pub(crate) mod pending_message;
pub(crate) mod pending_operation;
pub(crate) mod processor; pub(crate) mod processor;
pub use gas_payment::GAS_EXPENDITURE_LOG_MESSAGE;

@ -1,24 +1,20 @@
use std::{cmp::Reverse, collections::BinaryHeap, sync::Arc}; use std::{cmp::Reverse, collections::BinaryHeap, sync::Arc};
use derive_new::new; use derive_new::new;
use hyperlane_core::MpmcReceiver; use hyperlane_core::{PendingOperation, QueueOperation};
use prometheus::{IntGauge, IntGaugeVec}; use prometheus::{IntGauge, IntGaugeVec};
use tokio::sync::Mutex; use tokio::sync::{broadcast::Receiver, Mutex};
use tracing::{info, instrument}; use tracing::{debug, info, instrument};
use crate::server::MessageRetryRequest; use crate::server::MessageRetryRequest;
use super::pending_operation::PendingOperation;
pub type QueueOperation = Box<dyn PendingOperation>;
/// Queue of generic operations that can be submitted to a destination chain. /// Queue of generic operations that can be submitted to a destination chain.
/// Includes logic for maintaining queue metrics by the destination and `app_context` of an operation /// Includes logic for maintaining queue metrics by the destination and `app_context` of an operation
#[derive(Debug, Clone, new)] #[derive(Debug, Clone, new)]
pub struct OpQueue { pub struct OpQueue {
metrics: IntGaugeVec, metrics: IntGaugeVec,
queue_metrics_label: String, queue_metrics_label: String,
retry_rx: MpmcReceiver<MessageRetryRequest>, retry_rx: Arc<Mutex<Receiver<MessageRetryRequest>>>,
#[new(default)] #[new(default)]
queue: Arc<Mutex<BinaryHeap<Reverse<QueueOperation>>>>, queue: Arc<Mutex<BinaryHeap<Reverse<QueueOperation>>>>,
} }
@ -41,7 +37,7 @@ impl OpQueue {
} }
/// Pop multiple elements at once from the queue and update metrics /// Pop multiple elements at once from the queue and update metrics
#[instrument(skip(self), ret, fields(queue_label=%self.queue_metrics_label), level = "debug")] #[instrument(skip(self), fields(queue_label=%self.queue_metrics_label), level = "debug")]
pub async fn pop_many(&mut self, limit: usize) -> Vec<QueueOperation> { pub async fn pop_many(&mut self, limit: usize) -> Vec<QueueOperation> {
self.process_retry_requests().await; self.process_retry_requests().await;
let mut queue = self.queue.lock().await; let mut queue = self.queue.lock().await;
@ -55,6 +51,15 @@ impl OpQueue {
break; break;
} }
} }
// This function is called very often by the op_submitter tasks, so only log when there are operations to pop
// to avoid spamming the logs
if !popped.is_empty() {
debug!(
queue_label = %self.queue_metrics_label,
operations = ?popped,
"Popped OpQueue operations"
);
}
popped popped
} }
@ -64,7 +69,7 @@ impl OpQueue {
// The other consideration is whether to put the channel receiver in the OpQueue or in a dedicated task // The other consideration is whether to put the channel receiver in the OpQueue or in a dedicated task
// that also holds an Arc to the Mutex. For simplicity, we'll put it in the OpQueue for now. // that also holds an Arc to the Mutex. For simplicity, we'll put it in the OpQueue for now.
let mut message_retry_requests = vec![]; let mut message_retry_requests = vec![];
while let Ok(message_id) = self.retry_rx.receiver.try_recv() { while let Ok(message_id) = self.retry_rx.lock().await.try_recv() {
message_retry_requests.push(message_id); message_retry_requests.push(message_id);
} }
if message_retry_requests.is_empty() { if message_retry_requests.is_empty() {
@ -101,15 +106,15 @@ impl OpQueue {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::msg::pending_operation::PendingOperationResult;
use hyperlane_core::{ use hyperlane_core::{
HyperlaneDomain, HyperlaneMessage, KnownHyperlaneDomain, MpmcChannel, TryBatchAs, HyperlaneDomain, HyperlaneMessage, KnownHyperlaneDomain, PendingOperationResult,
TxOutcome, H256, TryBatchAs, TxOutcome, H256, U256,
}; };
use std::{ use std::{
collections::VecDeque, collections::VecDeque,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use tokio::sync;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct MockPendingOperation { struct MockPendingOperation {
@ -174,6 +179,10 @@ mod test {
todo!() todo!()
} }
fn get_tx_cost_estimate(&self) -> Option<U256> {
todo!()
}
/// This will be called after the operation has been submitted and is /// This will be called after the operation has been submitted and is
/// responsible for checking if the operation has reached a point at /// responsible for checking if the operation has reached a point at
/// which we consider it safe from reorgs. /// which we consider it safe from reorgs.
@ -181,6 +190,14 @@ mod test {
todo!() todo!()
} }
fn set_operation_outcome(
&mut self,
_submission_outcome: TxOutcome,
_submission_estimated_cost: U256,
) {
todo!()
}
fn next_attempt_after(&self) -> Option<Instant> { fn next_attempt_after(&self) -> Option<Instant> {
Some( Some(
Instant::now() Instant::now()
@ -212,13 +229,17 @@ mod test {
#[tokio::test] #[tokio::test]
async fn test_multiple_op_queues_message_id() { async fn test_multiple_op_queues_message_id() {
let (metrics, queue_metrics_label) = dummy_metrics_and_label(); let (metrics, queue_metrics_label) = dummy_metrics_and_label();
let mpmc_channel = MpmcChannel::new(100); let broadcaster = sync::broadcast::Sender::new(100);
let mut op_queue_1 = OpQueue::new( let mut op_queue_1 = OpQueue::new(
metrics.clone(), metrics.clone(),
queue_metrics_label.clone(), queue_metrics_label.clone(),
mpmc_channel.receiver(), Arc::new(Mutex::new(broadcaster.subscribe())),
);
let mut op_queue_2 = OpQueue::new(
metrics,
queue_metrics_label,
Arc::new(Mutex::new(broadcaster.subscribe())),
); );
let mut op_queue_2 = OpQueue::new(metrics, queue_metrics_label, mpmc_channel.receiver());
// Add some operations to the queue with increasing `next_attempt_after` values // Add some operations to the queue with increasing `next_attempt_after` values
let destination_domain: HyperlaneDomain = KnownHyperlaneDomain::Injective.into(); let destination_domain: HyperlaneDomain = KnownHyperlaneDomain::Injective.into();
@ -244,11 +265,10 @@ mod test {
} }
// Retry by message ids // Retry by message ids
let mpmc_tx = mpmc_channel.sender(); broadcaster
mpmc_tx
.send(MessageRetryRequest::MessageId(op_ids[1])) .send(MessageRetryRequest::MessageId(op_ids[1]))
.unwrap(); .unwrap();
mpmc_tx broadcaster
.send(MessageRetryRequest::MessageId(op_ids[2])) .send(MessageRetryRequest::MessageId(op_ids[2]))
.unwrap(); .unwrap();
@ -278,11 +298,11 @@ mod test {
#[tokio::test] #[tokio::test]
async fn test_destination_domain() { async fn test_destination_domain() {
let (metrics, queue_metrics_label) = dummy_metrics_and_label(); let (metrics, queue_metrics_label) = dummy_metrics_and_label();
let mpmc_channel = MpmcChannel::new(100); let broadcaster = sync::broadcast::Sender::new(100);
let mut op_queue = OpQueue::new( let mut op_queue = OpQueue::new(
metrics.clone(), metrics.clone(),
queue_metrics_label.clone(), queue_metrics_label.clone(),
mpmc_channel.receiver(), Arc::new(Mutex::new(broadcaster.subscribe())),
); );
// Add some operations to the queue with increasing `next_attempt_after` values // Add some operations to the queue with increasing `next_attempt_after` values
@ -304,8 +324,7 @@ mod test {
} }
// Retry by domain // Retry by domain
let mpmc_tx = mpmc_channel.sender(); broadcaster
mpmc_tx
.send(MessageRetryRequest::DestinationDomain( .send(MessageRetryRequest::DestinationDomain(
destination_domain_2.id(), destination_domain_2.id(),
)) ))

@ -1,10 +1,14 @@
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use derive_new::new; use derive_new::new;
use futures::future::join_all; use futures::future::join_all;
use futures_util::future::try_join_all; use futures_util::future::try_join_all;
use hyperlane_core::total_estimated_cost;
use prometheus::{IntCounter, IntGaugeVec}; use prometheus::{IntCounter, IntGaugeVec};
use tokio::sync::broadcast::Sender;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::sync::Mutex;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::sleep; use tokio::time::sleep;
use tokio_metrics::TaskMonitor; use tokio_metrics::TaskMonitor;
@ -14,14 +18,13 @@ use tracing::{info, warn};
use hyperlane_base::CoreMetrics; use hyperlane_base::CoreMetrics;
use hyperlane_core::{ use hyperlane_core::{
BatchItem, ChainCommunicationError, ChainResult, HyperlaneDomain, HyperlaneDomainProtocol, BatchItem, ChainCommunicationError, ChainResult, HyperlaneDomain, HyperlaneDomainProtocol,
HyperlaneMessage, MpmcReceiver, TxOutcome, HyperlaneMessage, PendingOperationResult, QueueOperation, TxOutcome,
}; };
use crate::msg::pending_message::CONFIRM_DELAY; use crate::msg::pending_message::CONFIRM_DELAY;
use crate::server::MessageRetryRequest; use crate::server::MessageRetryRequest;
use super::op_queue::{OpQueue, QueueOperation}; use super::op_queue::OpQueue;
use super::pending_operation::*;
/// SerialSubmitter accepts operations over a channel. It is responsible for /// SerialSubmitter accepts operations over a channel. It is responsible for
/// executing the right strategy to deliver those messages to the destination /// executing the right strategy to deliver those messages to the destination
@ -77,7 +80,7 @@ pub struct SerialSubmitter {
/// Receiver for new messages to submit. /// Receiver for new messages to submit.
rx: mpsc::UnboundedReceiver<QueueOperation>, rx: mpsc::UnboundedReceiver<QueueOperation>,
/// Receiver for retry requests. /// Receiver for retry requests.
retry_rx: MpmcReceiver<MessageRetryRequest>, retry_tx: Sender<MessageRetryRequest>,
/// Metrics for serial submitter. /// Metrics for serial submitter.
metrics: SerialSubmitterMetrics, metrics: SerialSubmitterMetrics,
/// Max batch size for submitting messages /// Max batch size for submitting messages
@ -101,24 +104,24 @@ impl SerialSubmitter {
domain, domain,
metrics, metrics,
rx: rx_prepare, rx: rx_prepare,
retry_rx, retry_tx,
max_batch_size, max_batch_size,
task_monitor, task_monitor,
} = self; } = self;
let prepare_queue = OpQueue::new( let prepare_queue = OpQueue::new(
metrics.submitter_queue_length.clone(), metrics.submitter_queue_length.clone(),
"prepare_queue".to_string(), "prepare_queue".to_string(),
retry_rx.clone(), Arc::new(Mutex::new(retry_tx.subscribe())),
); );
let submit_queue = OpQueue::new( let submit_queue = OpQueue::new(
metrics.submitter_queue_length.clone(), metrics.submitter_queue_length.clone(),
"submit_queue".to_string(), "submit_queue".to_string(),
retry_rx.clone(), Arc::new(Mutex::new(retry_tx.subscribe())),
); );
let confirm_queue = OpQueue::new( let confirm_queue = OpQueue::new(
metrics.submitter_queue_length.clone(), metrics.submitter_queue_length.clone(),
"confirm_queue".to_string(), "confirm_queue".to_string(),
retry_rx, Arc::new(Mutex::new(retry_tx.subscribe())),
); );
let tasks = [ let tasks = [
@ -241,6 +244,7 @@ async fn prepare_task(
metrics.ops_dropped.inc(); metrics.ops_dropped.inc();
} }
PendingOperationResult::Confirm => { PendingOperationResult::Confirm => {
debug!(?op, "Pushing operation to confirm queue");
confirm_queue.push(op).await; confirm_queue.push(op).await;
} }
} }
@ -425,11 +429,10 @@ impl OperationBatch {
async fn submit(self, confirm_queue: &mut OpQueue, metrics: &SerialSubmitterMetrics) { async fn submit(self, confirm_queue: &mut OpQueue, metrics: &SerialSubmitterMetrics) {
match self.try_submit_as_batch(metrics).await { match self.try_submit_as_batch(metrics).await {
Ok(outcome) => { Ok(outcome) => {
// TODO: use the `tx_outcome` with the total gas expenditure
// We'll need to proportionally set `used_gas` based on the tx_outcome, so it can be updated in the confirm step
// which means we need to add a `set_transaction_outcome` fn to `PendingOperation`
info!(outcome=?outcome, batch_size=self.operations.len(), batch=?self.operations, "Submitted transaction batch"); info!(outcome=?outcome, batch_size=self.operations.len(), batch=?self.operations, "Submitted transaction batch");
let total_estimated_cost = total_estimated_cost(&self.operations);
for mut op in self.operations { for mut op in self.operations {
op.set_operation_outcome(outcome.clone(), total_estimated_cost);
op.set_next_attempt_after(CONFIRM_DELAY); op.set_next_attempt_after(CONFIRM_DELAY);
confirm_queue.push(op).await; confirm_queue.push(op).await;
} }
@ -459,8 +462,6 @@ impl OperationBatch {
return Err(ChainCommunicationError::BatchIsEmpty); return Err(ChainCommunicationError::BatchIsEmpty);
}; };
// We use the estimated gas limit from the prior call to
// `process_estimate_costs` to avoid a second gas estimation.
let outcome = first_item.mailbox.process_batch(&batch).await?; let outcome = first_item.mailbox.process_batch(&batch).await?;
metrics.ops_submitted.inc_by(self.operations.len() as u64); metrics.ops_submitted.inc_by(self.operations.len() as u64);
Ok(outcome) Ok(outcome)

@ -9,8 +9,9 @@ use derive_new::new;
use eyre::Result; use eyre::Result;
use hyperlane_base::{db::HyperlaneRocksDB, CoreMetrics}; use hyperlane_base::{db::HyperlaneRocksDB, CoreMetrics};
use hyperlane_core::{ use hyperlane_core::{
BatchItem, ChainCommunicationError, ChainResult, HyperlaneChain, HyperlaneDomain, gas_used_by_operation, make_op_try, BatchItem, ChainCommunicationError, ChainResult,
HyperlaneMessage, Mailbox, MessageSubmissionData, TryBatchAs, TxOutcome, H256, U256, HyperlaneChain, HyperlaneDomain, HyperlaneMessage, Mailbox, MessageSubmissionData,
PendingOperation, PendingOperationResult, TryBatchAs, TxOutcome, H256, U256,
}; };
use prometheus::{IntCounter, IntGauge}; use prometheus::{IntCounter, IntGauge};
use tracing::{debug, error, info, instrument, trace, warn}; use tracing::{debug, error, info, instrument, trace, warn};
@ -18,7 +19,6 @@ use tracing::{debug, error, info, instrument, trace, warn};
use super::{ use super::{
gas_payment::GasPaymentEnforcer, gas_payment::GasPaymentEnforcer,
metadata::{BaseMetadataBuilder, MessageMetadataBuilder, MetadataBuilder}, metadata::{BaseMetadataBuilder, MessageMetadataBuilder, MetadataBuilder},
pending_operation::*,
}; };
pub const CONFIRM_DELAY: Duration = if cfg!(any(test, feature = "test-utils")) { pub const CONFIRM_DELAY: Duration = if cfg!(any(test, feature = "test-utils")) {
@ -259,7 +259,7 @@ impl PendingOperation for PendingMessage {
let state = self let state = self
.submission_data .submission_data
.take() .clone()
.expect("Pending message must be prepared before it can be submitted"); .expect("Pending message must be prepared before it can be submitted");
// We use the estimated gas limit from the prior call to // We use the estimated gas limit from the prior call to
@ -271,7 +271,7 @@ impl PendingOperation for PendingMessage {
.await; .await;
match tx_outcome { match tx_outcome {
Ok(outcome) => { Ok(outcome) => {
self.set_submission_outcome(outcome); self.set_operation_outcome(outcome, state.gas_limit);
} }
Err(e) => { Err(e) => {
error!(error=?e, "Error when processing message"); error!(error=?e, "Error when processing message");
@ -283,6 +283,10 @@ impl PendingOperation for PendingMessage {
self.submission_outcome = Some(outcome); self.submission_outcome = Some(outcome);
} }
fn get_tx_cost_estimate(&self) -> Option<U256> {
self.submission_data.as_ref().map(|d| d.gas_limit)
}
async fn confirm(&mut self) -> PendingOperationResult { async fn confirm(&mut self) -> PendingOperationResult {
make_op_try!(|| { make_op_try!(|| {
// Provider error; just try again later // Provider error; just try again later
@ -313,15 +317,6 @@ impl PendingOperation for PendingMessage {
); );
PendingOperationResult::Success PendingOperationResult::Success
} else { } else {
if let Some(outcome) = &self.submission_outcome {
if let Err(e) = self
.ctx
.origin_gas_payment_enforcer
.record_tx_outcome(&self.message, outcome.clone())
{
error!(error=?e, "Error when recording tx outcome");
}
}
warn!( warn!(
tx_outcome=?self.submission_outcome, tx_outcome=?self.submission_outcome,
message_id=?self.message.id(), message_id=?self.message.id(),
@ -331,6 +326,50 @@ impl PendingOperation for PendingMessage {
} }
} }
fn set_operation_outcome(
&mut self,
submission_outcome: TxOutcome,
submission_estimated_cost: U256,
) {
let Some(operation_estimate) = self.get_tx_cost_estimate() else {
warn!("Cannot set operation outcome without a cost estimate set previously");
return;
};
// calculate the gas used by the operation
let gas_used_by_operation = match gas_used_by_operation(
&submission_outcome,
submission_estimated_cost,
operation_estimate,
) {
Ok(gas_used_by_operation) => gas_used_by_operation,
Err(e) => {
warn!(error = %e, "Error when calculating gas used by operation, falling back to charging the full cost of the tx. Are gas estimates enabled for this chain?");
submission_outcome.gas_used
}
};
let operation_outcome = TxOutcome {
gas_used: gas_used_by_operation,
..submission_outcome
};
// record it in the db, to subtract from the sender's igp allowance
if let Err(e) = self
.ctx
.origin_gas_payment_enforcer
.record_tx_outcome(&self.message, operation_outcome.clone())
{
error!(error=?e, "Error when recording tx outcome");
}
// set the outcome in `Self` as well, for later logging
self.set_submission_outcome(operation_outcome);
debug!(
actual_gas_for_message = ?gas_used_by_operation,
message_gas_estimate = ?operation_estimate,
submission_gas_estimate = ?submission_estimated_cost,
message = ?self.message,
"Gas used by message submission"
);
}
fn next_attempt_after(&self) -> Option<Instant> { fn next_attempt_after(&self) -> Option<Instant> {
self.next_attempt_after self.next_attempt_after
} }
@ -343,7 +382,6 @@ impl PendingOperation for PendingMessage {
self.reset_attempts(); self.reset_attempts();
} }
#[cfg(test)]
fn set_retries(&mut self, retries: u32) { fn set_retries(&mut self, retries: u32) {
self.set_retries(retries); self.set_retries(retries);
} }

@ -13,12 +13,12 @@ use hyperlane_base::{
db::{HyperlaneRocksDB, ProcessMessage}, db::{HyperlaneRocksDB, ProcessMessage},
CoreMetrics, CoreMetrics,
}; };
use hyperlane_core::{HyperlaneDomain, HyperlaneMessage}; use hyperlane_core::{HyperlaneDomain, HyperlaneMessage, QueueOperation};
use prometheus::IntGauge; use prometheus::IntGauge;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tracing::{debug, instrument, trace}; use tracing::{debug, instrument, trace};
use super::{metadata::AppContextClassifier, op_queue::QueueOperation, pending_message::*}; use super::{metadata::AppContextClassifier, pending_message::*};
use crate::{processor::ProcessorExt, settings::matching_list::MatchingList}; use crate::{processor::ProcessorExt, settings::matching_list::MatchingList};
/// Finds unprocessed messages from an origin and submits then through a channel /// Finds unprocessed messages from an origin and submits then through a channel
@ -138,7 +138,10 @@ impl DirectionalNonceIterator {
#[instrument] #[instrument]
fn iterate(&mut self) { fn iterate(&mut self) {
match self.direction { match self.direction {
NonceDirection::High => self.nonce = self.nonce.map(|n| n.saturating_add(1)), NonceDirection::High => {
self.nonce = self.nonce.map(|n| n.saturating_add(1));
debug!(?self, "Iterating high nonce");
}
NonceDirection::Low => { NonceDirection::Low => {
if let Some(nonce) = self.nonce { if let Some(nonce) = self.nonce {
// once the message with nonce zero is processed, we should stop going backwards // once the message with nonce zero is processed, we should stop going backwards
@ -155,6 +158,7 @@ impl DirectionalNonceIterator {
if let Some(message) = self.indexed_message_with_nonce()? { if let Some(message) = self.indexed_message_with_nonce()? {
Self::update_max_nonce_gauge(&message, metrics); Self::update_max_nonce_gauge(&message, metrics);
if !self.is_message_processed()? { if !self.is_message_processed()? {
debug!(?message, iterator=?self, "Found processable message");
return Ok(MessageStatus::Processable(message)); return Ok(MessageStatus::Processable(message));
} else { } else {
return Ok(MessageStatus::Processed); return Ok(MessageStatus::Processed);
@ -235,7 +239,11 @@ impl ProcessorExt for MessageProcessor {
// nonce. // nonce.
// Scan until we find next nonce without delivery confirmation. // Scan until we find next nonce without delivery confirmation.
if let Some(msg) = self.try_get_unprocessed_message().await? { if let Some(msg) = self.try_get_unprocessed_message().await? {
debug!(?msg, "Processor working on message"); debug!(
?msg,
cursor = ?self.nonce_iterator,
"Processor working on message"
);
let destination = msg.destination; let destination = msg.destination;
// Skip if not whitelisted. // Skip if not whitelisted.

@ -13,13 +13,15 @@ use hyperlane_base::{
metrics::{AgentMetrics, MetricsUpdater}, metrics::{AgentMetrics, MetricsUpdater},
settings::ChainConf, settings::ChainConf,
BaseAgent, ChainMetrics, ContractSyncMetrics, ContractSyncer, CoreMetrics, HyperlaneAgentCore, BaseAgent, ChainMetrics, ContractSyncMetrics, ContractSyncer, CoreMetrics, HyperlaneAgentCore,
SyncOptions,
}; };
use hyperlane_core::{ use hyperlane_core::{
HyperlaneDomain, HyperlaneMessage, InterchainGasPayment, MerkleTreeInsertion, MpmcChannel, HyperlaneDomain, HyperlaneMessage, InterchainGasPayment, MerkleTreeInsertion, QueueOperation,
MpmcReceiver, U256, H512, U256,
}; };
use tokio::{ use tokio::{
sync::{ sync::{
broadcast::{Receiver, Sender},
mpsc::{self, UnboundedReceiver, UnboundedSender}, mpsc::{self, UnboundedReceiver, UnboundedSender},
RwLock, RwLock,
}, },
@ -33,7 +35,6 @@ use crate::{
msg::{ msg::{
gas_payment::GasPaymentEnforcer, gas_payment::GasPaymentEnforcer,
metadata::{BaseMetadataBuilder, IsmAwareAppContextClassifier}, metadata::{BaseMetadataBuilder, IsmAwareAppContextClassifier},
op_queue::QueueOperation,
op_submitter::{SerialSubmitter, SerialSubmitterMetrics}, op_submitter::{SerialSubmitter, SerialSubmitterMetrics},
pending_message::{MessageContext, MessageSubmissionMetrics}, pending_message::{MessageContext, MessageSubmissionMetrics},
processor::{MessageProcessor, MessageProcessorMetrics}, processor::{MessageProcessor, MessageProcessorMetrics},
@ -134,7 +135,7 @@ impl BaseAgent for Relayer {
let contract_sync_metrics = Arc::new(ContractSyncMetrics::new(&core_metrics)); let contract_sync_metrics = Arc::new(ContractSyncMetrics::new(&core_metrics));
let message_syncs = settings let message_syncs: HashMap<_, Arc<dyn ContractSyncer<HyperlaneMessage>>> = settings
.contract_syncs::<HyperlaneMessage, _>( .contract_syncs::<HyperlaneMessage, _>(
settings.origin_chains.iter(), settings.origin_chains.iter(),
&core_metrics, &core_metrics,
@ -305,8 +306,8 @@ impl BaseAgent for Relayer {
} }
// run server // run server
let mpmc_channel = MpmcChannel::<MessageRetryRequest>::new(ENDPOINT_MESSAGES_QUEUE_SIZE); let sender = Sender::<MessageRetryRequest>::new(ENDPOINT_MESSAGES_QUEUE_SIZE);
let custom_routes = relayer_server::routes(mpmc_channel.sender()); let custom_routes = relayer_server::routes(sender.clone());
let server = self let server = self
.core .core
@ -328,7 +329,7 @@ impl BaseAgent for Relayer {
self.run_destination_submitter( self.run_destination_submitter(
dest_domain, dest_domain,
receive_channel, receive_channel,
mpmc_channel.receiver(), sender.clone(),
// Default to submitting one message at a time if there is no batch config // Default to submitting one message at a time if there is no batch config
self.core.settings.chains[dest_domain.name()] self.core.settings.chains[dest_domain.name()]
.connection .connection
@ -352,13 +353,25 @@ impl BaseAgent for Relayer {
} }
for origin in &self.origin_chains { for origin in &self.origin_chains {
let maybe_broadcaster = self
.message_syncs
.get(origin)
.and_then(|sync| sync.get_broadcaster());
tasks.push(self.run_message_sync(origin, task_monitor.clone()).await); tasks.push(self.run_message_sync(origin, task_monitor.clone()).await);
tasks.push( tasks.push(
self.run_interchain_gas_payment_sync(origin, task_monitor.clone()) self.run_interchain_gas_payment_sync(
origin,
maybe_broadcaster.clone().map(|b| b.subscribe()),
task_monitor.clone(),
)
.await, .await,
); );
tasks.push( tasks.push(
self.run_merkle_tree_hook_syncs(origin, task_monitor.clone()) self.run_merkle_tree_hook_syncs(
origin,
maybe_broadcaster.map(|b| b.subscribe()),
task_monitor.clone(),
)
.await, .await,
); );
} }
@ -394,7 +407,7 @@ impl Relayer {
tokio::spawn(TaskMonitor::instrument(&task_monitor, async move { tokio::spawn(TaskMonitor::instrument(&task_monitor, async move {
contract_sync contract_sync
.clone() .clone()
.sync("dispatched_messages", cursor) .sync("dispatched_messages", cursor.into())
.await .await
})) }))
.instrument(info_span!("MessageSync")) .instrument(info_span!("MessageSync"))
@ -403,6 +416,7 @@ impl Relayer {
async fn run_interchain_gas_payment_sync( async fn run_interchain_gas_payment_sync(
&self, &self,
origin: &HyperlaneDomain, origin: &HyperlaneDomain,
tx_id_receiver: Option<Receiver<H512>>,
task_monitor: TaskMonitor, task_monitor: TaskMonitor,
) -> Instrumented<JoinHandle<()>> { ) -> Instrumented<JoinHandle<()>> {
let index_settings = self.as_ref().settings.chains[origin.name()].index_settings(); let index_settings = self.as_ref().settings.chains[origin.name()].index_settings();
@ -413,7 +427,13 @@ impl Relayer {
.clone(); .clone();
let cursor = contract_sync.cursor(index_settings).await; let cursor = contract_sync.cursor(index_settings).await;
tokio::spawn(TaskMonitor::instrument(&task_monitor, async move { tokio::spawn(TaskMonitor::instrument(&task_monitor, async move {
contract_sync.clone().sync("gas_payments", cursor).await contract_sync
.clone()
.sync(
"gas_payments",
SyncOptions::new(Some(cursor), tx_id_receiver),
)
.await
})) }))
.instrument(info_span!("IgpSync")) .instrument(info_span!("IgpSync"))
} }
@ -421,13 +441,20 @@ impl Relayer {
async fn run_merkle_tree_hook_syncs( async fn run_merkle_tree_hook_syncs(
&self, &self,
origin: &HyperlaneDomain, origin: &HyperlaneDomain,
tx_id_receiver: Option<Receiver<H512>>,
task_monitor: TaskMonitor, task_monitor: TaskMonitor,
) -> Instrumented<JoinHandle<()>> { ) -> Instrumented<JoinHandle<()>> {
let index_settings = self.as_ref().settings.chains[origin.name()].index.clone(); let index_settings = self.as_ref().settings.chains[origin.name()].index.clone();
let contract_sync = self.merkle_tree_hook_syncs.get(origin).unwrap().clone(); let contract_sync = self.merkle_tree_hook_syncs.get(origin).unwrap().clone();
let cursor = contract_sync.cursor(index_settings).await; let cursor = contract_sync.cursor(index_settings).await;
tokio::spawn(TaskMonitor::instrument(&task_monitor, async move { tokio::spawn(TaskMonitor::instrument(&task_monitor, async move {
contract_sync.clone().sync("merkle_tree_hook", cursor).await contract_sync
.clone()
.sync(
"merkle_tree_hook",
SyncOptions::new(Some(cursor), tx_id_receiver),
)
.await
})) }))
.instrument(info_span!("MerkleTreeHookSync")) .instrument(info_span!("MerkleTreeHookSync"))
} }
@ -498,7 +525,7 @@ impl Relayer {
&self, &self,
destination: &HyperlaneDomain, destination: &HyperlaneDomain,
receiver: UnboundedReceiver<QueueOperation>, receiver: UnboundedReceiver<QueueOperation>,
retry_receiver_channel: MpmcReceiver<MessageRetryRequest>, retry_receiver_channel: Sender<MessageRetryRequest>,
batch_size: u32, batch_size: u32,
task_monitor: TaskMonitor, task_monitor: TaskMonitor,
) -> Instrumented<JoinHandle<()>> { ) -> Instrumented<JoinHandle<()>> {

@ -3,13 +3,11 @@ use axum::{
routing, Router, routing, Router,
}; };
use derive_new::new; use derive_new::new;
use hyperlane_core::{ChainCommunicationError, H256}; use hyperlane_core::{ChainCommunicationError, QueueOperation, H256};
use serde::Deserialize; use serde::Deserialize;
use std::str::FromStr; use std::str::FromStr;
use tokio::sync::broadcast::Sender; use tokio::sync::broadcast::Sender;
use crate::msg::op_queue::QueueOperation;
const MESSAGE_RETRY_API_BASE: &str = "/message_retry"; const MESSAGE_RETRY_API_BASE: &str = "/message_retry";
pub const ENDPOINT_MESSAGES_QUEUE_SIZE: usize = 1_000; pub const ENDPOINT_MESSAGES_QUEUE_SIZE: usize = 1_000;
@ -109,12 +107,12 @@ mod tests {
use super::*; use super::*;
use axum::http::StatusCode; use axum::http::StatusCode;
use ethers::utils::hex::ToHex; use ethers::utils::hex::ToHex;
use hyperlane_core::{MpmcChannel, MpmcReceiver};
use std::net::SocketAddr; use std::net::SocketAddr;
use tokio::sync::broadcast::{Receiver, Sender};
fn setup_test_server() -> (SocketAddr, MpmcReceiver<MessageRetryRequest>) { fn setup_test_server() -> (SocketAddr, Receiver<MessageRetryRequest>) {
let mpmc_channel = MpmcChannel::<MessageRetryRequest>::new(ENDPOINT_MESSAGES_QUEUE_SIZE); let broadcast_tx = Sender::<MessageRetryRequest>::new(ENDPOINT_MESSAGES_QUEUE_SIZE);
let message_retry_api = MessageRetryApi::new(mpmc_channel.sender()); let message_retry_api = MessageRetryApi::new(broadcast_tx.clone());
let (path, retry_router) = message_retry_api.get_route(); let (path, retry_router) = message_retry_api.get_route();
let app = Router::new().nest(path, retry_router); let app = Router::new().nest(path, retry_router);
@ -124,7 +122,7 @@ mod tests {
let addr = server.local_addr(); let addr = server.local_addr();
tokio::spawn(server); tokio::spawn(server);
(addr, mpmc_channel.receiver()) (addr, broadcast_tx.subscribe())
} }
#[tokio::test] #[tokio::test]
@ -148,7 +146,7 @@ mod tests {
assert_eq!(response.status(), StatusCode::OK); assert_eq!(response.status(), StatusCode::OK);
assert_eq!( assert_eq!(
rx.receiver.try_recv().unwrap(), rx.try_recv().unwrap(),
MessageRetryRequest::MessageId(message_id) MessageRetryRequest::MessageId(message_id)
); );
} }
@ -172,7 +170,7 @@ mod tests {
assert_eq!(response.status(), StatusCode::OK); assert_eq!(response.status(), StatusCode::OK);
assert_eq!( assert_eq!(
rx.receiver.try_recv().unwrap(), rx.try_recv().unwrap(),
MessageRetryRequest::DestinationDomain(destination_domain) MessageRetryRequest::DestinationDomain(destination_domain)
); );
} }

@ -5,10 +5,13 @@ use derive_more::AsRef;
use futures::future::try_join_all; use futures::future::try_join_all;
use hyperlane_base::{ use hyperlane_base::{
metrics::AgentMetrics, settings::IndexSettings, BaseAgent, ChainMetrics, ContractSyncMetrics, metrics::AgentMetrics, settings::IndexSettings, BaseAgent, ChainMetrics, ContractSyncMetrics,
ContractSyncer, CoreMetrics, HyperlaneAgentCore, MetricsUpdater, ContractSyncer, CoreMetrics, HyperlaneAgentCore, MetricsUpdater, SyncOptions,
};
use hyperlane_core::{Delivery, HyperlaneDomain, HyperlaneMessage, InterchainGasPayment, H512};
use tokio::{
sync::broadcast::{Receiver, Sender},
task::JoinHandle,
}; };
use hyperlane_core::{Delivery, HyperlaneDomain, HyperlaneMessage, InterchainGasPayment};
use tokio::task::JoinHandle;
use tracing::{info_span, instrument::Instrumented, trace, Instrument}; use tracing::{info_span, instrument::Instrumented, trace, Instrument};
use crate::{chain_scraper::HyperlaneSqlDb, db::ScraperDb, settings::ScraperSettings}; use crate::{chain_scraper::HyperlaneSqlDb, db::ScraperDb, settings::ScraperSettings};
@ -135,16 +138,16 @@ impl Scraper {
let domain = scraper.domain.clone(); let domain = scraper.domain.clone();
let mut tasks = Vec::with_capacity(2); let mut tasks = Vec::with_capacity(2);
tasks.push( let (message_indexer, maybe_broadcaster) = self
self.build_message_indexer( .build_message_indexer(
domain.clone(), domain.clone(),
self.core_metrics.clone(), self.core_metrics.clone(),
self.contract_sync_metrics.clone(), self.contract_sync_metrics.clone(),
db.clone(), db.clone(),
index_settings.clone(), index_settings.clone(),
) )
.await, .await;
); tasks.push(message_indexer);
tasks.push( tasks.push(
self.build_delivery_indexer( self.build_delivery_indexer(
domain.clone(), domain.clone(),
@ -152,6 +155,7 @@ impl Scraper {
self.contract_sync_metrics.clone(), self.contract_sync_metrics.clone(),
db.clone(), db.clone(),
index_settings.clone(), index_settings.clone(),
maybe_broadcaster.clone().map(|b| b.subscribe()),
) )
.await, .await,
); );
@ -162,6 +166,7 @@ impl Scraper {
self.contract_sync_metrics.clone(), self.contract_sync_metrics.clone(),
db, db,
index_settings.clone(), index_settings.clone(),
maybe_broadcaster.map(|b| b.subscribe()),
) )
.await, .await,
); );
@ -182,7 +187,7 @@ impl Scraper {
contract_sync_metrics: Arc<ContractSyncMetrics>, contract_sync_metrics: Arc<ContractSyncMetrics>,
db: HyperlaneSqlDb, db: HyperlaneSqlDb,
index_settings: IndexSettings, index_settings: IndexSettings,
) -> Instrumented<JoinHandle<()>> { ) -> (Instrumented<JoinHandle<()>>, Option<Sender<H512>>) {
let sync = self let sync = self
.as_ref() .as_ref()
.settings .settings
@ -195,9 +200,12 @@ impl Scraper {
.await .await
.unwrap(); .unwrap();
let cursor = sync.cursor(index_settings.clone()).await; let cursor = sync.cursor(index_settings.clone()).await;
tokio::spawn(async move { sync.sync("message_dispatch", cursor).await }).instrument( let maybe_broadcaser = sync.get_broadcaster();
let task = tokio::spawn(async move { sync.sync("message_dispatch", cursor.into()).await })
.instrument(
info_span!("ChainContractSync", chain=%domain.name(), event="message_dispatch"), info_span!("ChainContractSync", chain=%domain.name(), event="message_dispatch"),
) );
(task, maybe_broadcaser)
} }
async fn build_delivery_indexer( async fn build_delivery_indexer(
@ -207,6 +215,7 @@ impl Scraper {
contract_sync_metrics: Arc<ContractSyncMetrics>, contract_sync_metrics: Arc<ContractSyncMetrics>,
db: HyperlaneSqlDb, db: HyperlaneSqlDb,
index_settings: IndexSettings, index_settings: IndexSettings,
tx_id_receiver: Option<Receiver<H512>>,
) -> Instrumented<JoinHandle<()>> { ) -> Instrumented<JoinHandle<()>> {
let sync = self let sync = self
.as_ref() .as_ref()
@ -222,7 +231,10 @@ impl Scraper {
let label = "message_delivery"; let label = "message_delivery";
let cursor = sync.cursor(index_settings.clone()).await; let cursor = sync.cursor(index_settings.clone()).await;
tokio::spawn(async move { sync.sync(label, cursor).await }) tokio::spawn(async move {
sync.sync(label, SyncOptions::new(Some(cursor), tx_id_receiver))
.await
})
.instrument(info_span!("ChainContractSync", chain=%domain.name(), event=label)) .instrument(info_span!("ChainContractSync", chain=%domain.name(), event=label))
} }
@ -233,6 +245,7 @@ impl Scraper {
contract_sync_metrics: Arc<ContractSyncMetrics>, contract_sync_metrics: Arc<ContractSyncMetrics>,
db: HyperlaneSqlDb, db: HyperlaneSqlDb,
index_settings: IndexSettings, index_settings: IndexSettings,
tx_id_receiver: Option<Receiver<H512>>,
) -> Instrumented<JoinHandle<()>> { ) -> Instrumented<JoinHandle<()>> {
let sync = self let sync = self
.as_ref() .as_ref()
@ -248,7 +261,10 @@ impl Scraper {
let label = "gas_payment"; let label = "gas_payment";
let cursor = sync.cursor(index_settings.clone()).await; let cursor = sync.cursor(index_settings.clone()).await;
tokio::spawn(async move { sync.sync(label, cursor).await }) tokio::spawn(async move {
sync.sync(label, SyncOptions::new(Some(cursor), tx_id_receiver))
.await
})
.instrument(info_span!("ChainContractSync", chain=%domain.name(), event=label)) .instrument(info_span!("ChainContractSync", chain=%domain.name(), event=label))
} }
} }

@ -210,7 +210,10 @@ impl Validator {
let contract_sync = self.merkle_tree_hook_sync.clone(); let contract_sync = self.merkle_tree_hook_sync.clone();
let cursor = contract_sync.cursor(index_settings).await; let cursor = contract_sync.cursor(index_settings).await;
tokio::spawn(async move { tokio::spawn(async move {
contract_sync.clone().sync("merkle_tree_hook", cursor).await; contract_sync
.clone()
.sync("merkle_tree_hook", cursor.into())
.await;
}) })
.instrument(info_span!("MerkleTreeHookSyncer")) .instrument(info_span!("MerkleTreeHookSyncer"))
} }

@ -202,7 +202,7 @@ impl CosmosInterchainGasPaymasterIndexer {
#[async_trait] #[async_trait]
impl Indexer<InterchainGasPayment> for CosmosInterchainGasPaymasterIndexer { impl Indexer<InterchainGasPayment> for CosmosInterchainGasPaymasterIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> {

@ -350,7 +350,7 @@ impl CosmosMailboxIndexer {
#[async_trait] #[async_trait]
impl Indexer<HyperlaneMessage> for CosmosMailboxIndexer { impl Indexer<HyperlaneMessage> for CosmosMailboxIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> {
@ -397,7 +397,7 @@ impl Indexer<HyperlaneMessage> for CosmosMailboxIndexer {
#[async_trait] #[async_trait]
impl Indexer<H256> for CosmosMailboxIndexer { impl Indexer<H256> for CosmosMailboxIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> {

@ -283,7 +283,7 @@ impl CosmosMerkleTreeHookIndexer {
#[async_trait] #[async_trait]
impl Indexer<MerkleTreeInsertion> for CosmosMerkleTreeHookIndexer { impl Indexer<MerkleTreeInsertion> for CosmosMerkleTreeHookIndexer {
/// Fetch list of logs between `range` of blocks /// Fetch list of logs between `range` of blocks
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<MerkleTreeInsertion>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<MerkleTreeInsertion>, LogMeta)>> {

@ -10,12 +10,14 @@ use ethers::prelude::Middleware;
use hyperlane_core::{ use hyperlane_core::{
ChainCommunicationError, ChainResult, ContractLocator, HyperlaneAbi, HyperlaneChain, ChainCommunicationError, ChainResult, ContractLocator, HyperlaneAbi, HyperlaneChain,
HyperlaneContract, HyperlaneDomain, HyperlaneProvider, Indexed, Indexer, HyperlaneContract, HyperlaneDomain, HyperlaneProvider, Indexed, Indexer,
InterchainGasPaymaster, InterchainGasPayment, LogMeta, SequenceAwareIndexer, H160, H256, InterchainGasPaymaster, InterchainGasPayment, LogMeta, SequenceAwareIndexer, H160, H256, H512,
}; };
use tracing::instrument; use tracing::instrument;
use super::utils::fetch_raw_logs_and_log_meta;
use crate::interfaces::i_interchain_gas_paymaster::{ use crate::interfaces::i_interchain_gas_paymaster::{
IInterchainGasPaymaster as EthereumInterchainGasPaymasterInternal, IINTERCHAINGASPAYMASTER_ABI, GasPaymentFilter, IInterchainGasPaymaster as EthereumInterchainGasPaymasterInternal,
IINTERCHAINGASPAYMASTER_ABI,
}; };
use crate::{BuildableWithProvider, ConnectionConf, EthereumProvider}; use crate::{BuildableWithProvider, ConnectionConf, EthereumProvider};
@ -86,7 +88,7 @@ where
{ {
/// Note: This call may return duplicates depending on the provider used /// Note: This call may return duplicates depending on the provider used
#[instrument(err, skip(self))] #[instrument(err, skip(self))]
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> {
@ -124,6 +126,32 @@ where
.as_u32() .as_u32()
.saturating_sub(self.reorg_period)) .saturating_sub(self.reorg_period))
} }
async fn fetch_logs_by_tx_hash(
&self,
tx_hash: H512,
) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> {
let logs = fetch_raw_logs_and_log_meta::<GasPaymentFilter, M>(
tx_hash,
self.provider.clone(),
self.contract.address(),
)
.await?
.into_iter()
.map(|(log, log_meta)| {
(
Indexed::new(InterchainGasPayment {
message_id: H256::from(log.message_id),
destination: log.destination_domain,
payment: log.payment.into(),
gas_amount: log.gas_amount.into(),
}),
log_meta,
)
})
.collect();
Ok(logs)
}
} }
#[async_trait] #[async_trait]

@ -11,6 +11,7 @@ use ethers::abi::{AbiEncode, Detokenize};
use ethers::prelude::Middleware; use ethers::prelude::Middleware;
use ethers_contract::builders::ContractCall; use ethers_contract::builders::ContractCall;
use futures_util::future::join_all; use futures_util::future::join_all;
use hyperlane_core::H512;
use tracing::instrument; use tracing::instrument;
use hyperlane_core::{ use hyperlane_core::{
@ -25,10 +26,12 @@ use crate::interfaces::arbitrum_node_interface::ArbitrumNodeInterface;
use crate::interfaces::i_mailbox::{ use crate::interfaces::i_mailbox::{
IMailbox as EthereumMailboxInternal, ProcessCall, IMAILBOX_ABI, IMailbox as EthereumMailboxInternal, ProcessCall, IMAILBOX_ABI,
}; };
use crate::interfaces::mailbox::DispatchFilter;
use crate::tx::{call_with_lag, fill_tx_gas_params, report_tx}; use crate::tx::{call_with_lag, fill_tx_gas_params, report_tx};
use crate::{BuildableWithProvider, ConnectionConf, EthereumProvider, TransactionOverrides}; use crate::{BuildableWithProvider, ConnectionConf, EthereumProvider, TransactionOverrides};
use super::multicall::{self, build_multicall}; use super::multicall::{self, build_multicall};
use super::utils::fetch_raw_logs_and_log_meta;
impl<M> std::fmt::Display for EthereumMailboxInternal<M> impl<M> std::fmt::Display for EthereumMailboxInternal<M>
where where
@ -134,7 +137,7 @@ where
/// Note: This call may return duplicates depending on the provider used /// Note: This call may return duplicates depending on the provider used
#[instrument(err, skip(self))] #[instrument(err, skip(self))]
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> {
@ -157,6 +160,27 @@ where
events.sort_by(|a, b| a.0.inner().nonce.cmp(&b.0.inner().nonce)); events.sort_by(|a, b| a.0.inner().nonce.cmp(&b.0.inner().nonce));
Ok(events) Ok(events)
} }
async fn fetch_logs_by_tx_hash(
&self,
tx_hash: H512,
) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> {
let logs = fetch_raw_logs_and_log_meta::<DispatchFilter, M>(
tx_hash,
self.provider.clone(),
self.contract.address(),
)
.await?
.into_iter()
.map(|(log, log_meta)| {
(
HyperlaneMessage::from(log.message.to_vec()).into(),
log_meta,
)
})
.collect();
Ok(logs)
}
} }
#[async_trait] #[async_trait]
@ -183,7 +207,7 @@ where
/// Note: This call may return duplicates depending on the provider used /// Note: This call may return duplicates depending on the provider used
#[instrument(err, skip(self))] #[instrument(err, skip(self))]
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> {

@ -11,13 +11,17 @@ use tracing::instrument;
use hyperlane_core::{ use hyperlane_core::{
ChainCommunicationError, ChainResult, Checkpoint, ContractLocator, HyperlaneChain, ChainCommunicationError, ChainResult, Checkpoint, ContractLocator, HyperlaneChain,
HyperlaneContract, HyperlaneDomain, HyperlaneProvider, Indexed, Indexer, LogMeta, HyperlaneContract, HyperlaneDomain, HyperlaneProvider, Indexed, Indexer, LogMeta,
MerkleTreeHook, MerkleTreeInsertion, SequenceAwareIndexer, H256, MerkleTreeHook, MerkleTreeInsertion, SequenceAwareIndexer, H256, H512,
}; };
use crate::interfaces::merkle_tree_hook::{MerkleTreeHook as MerkleTreeHookContract, Tree}; use crate::interfaces::merkle_tree_hook::{
InsertedIntoTreeFilter, MerkleTreeHook as MerkleTreeHookContract, Tree,
};
use crate::tx::call_with_lag; use crate::tx::call_with_lag;
use crate::{BuildableWithProvider, ConnectionConf, EthereumProvider}; use crate::{BuildableWithProvider, ConnectionConf, EthereumProvider};
use super::utils::fetch_raw_logs_and_log_meta;
// We don't need the reverse of this impl, so it's ok to disable the clippy lint // We don't need the reverse of this impl, so it's ok to disable the clippy lint
#[allow(clippy::from_over_into)] #[allow(clippy::from_over_into)]
impl Into<IncrementalMerkle> for Tree { impl Into<IncrementalMerkle> for Tree {
@ -108,7 +112,7 @@ where
{ {
/// Note: This call may return duplicates depending on the provider used /// Note: This call may return duplicates depending on the provider used
#[instrument(err, skip(self))] #[instrument(err, skip(self))]
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<MerkleTreeInsertion>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<MerkleTreeInsertion>, LogMeta)>> {
@ -142,6 +146,27 @@ where
.as_u32() .as_u32()
.saturating_sub(self.reorg_period)) .saturating_sub(self.reorg_period))
} }
async fn fetch_logs_by_tx_hash(
&self,
tx_hash: H512,
) -> ChainResult<Vec<(Indexed<MerkleTreeInsertion>, LogMeta)>> {
let logs = fetch_raw_logs_and_log_meta::<InsertedIntoTreeFilter, M>(
tx_hash,
self.provider.clone(),
self.contract.address(),
)
.await?
.into_iter()
.map(|(log, log_meta)| {
(
MerkleTreeInsertion::new(log.index, H256::from(log.message_id)).into(),
log_meta,
)
})
.collect();
Ok(logs)
}
} }
#[async_trait] #[async_trait]

@ -1,11 +1,8 @@
pub use {interchain_gas::*, mailbox::*, merkle_tree_hook::*, validator_announce::*}; pub use {interchain_gas::*, mailbox::*, merkle_tree_hook::*, validator_announce::*};
mod interchain_gas; mod interchain_gas;
mod mailbox; mod mailbox;
mod merkle_tree_hook; mod merkle_tree_hook;
mod multicall; mod multicall;
mod utils;
mod validator_announce; mod validator_announce;

@ -0,0 +1,48 @@
use std::sync::Arc;
use ethers::{
abi::RawLog,
providers::Middleware,
types::{H160 as EthersH160, H256 as EthersH256},
};
use ethers_contract::{ContractError, EthEvent, LogMeta as EthersLogMeta};
use hyperlane_core::{ChainResult, LogMeta, H512};
use tracing::warn;
pub async fn fetch_raw_logs_and_log_meta<T: EthEvent, M>(
tx_hash: H512,
provider: Arc<M>,
contract_address: EthersH160,
) -> ChainResult<Vec<(T, LogMeta)>>
where
M: Middleware + 'static,
{
let ethers_tx_hash: EthersH256 = tx_hash.into();
let receipt = provider
.get_transaction_receipt(ethers_tx_hash)
.await
.map_err(|err| ContractError::<M>::MiddlewareError(err))?;
let Some(receipt) = receipt else {
warn!(%tx_hash, "No receipt found for tx hash");
return Ok(vec![]);
};
let logs: Vec<(T, LogMeta)> = receipt
.logs
.into_iter()
.filter_map(|log| {
// Filter out logs that aren't emitted by this contract
if log.address != contract_address {
return None;
}
let raw_log = RawLog {
topics: log.topics.clone(),
data: log.data.to_vec(),
};
let log_meta: EthersLogMeta = (&log).into();
let event_filter = T::decode_log(&raw_log).ok();
event_filter.map(|log| (log, log_meta.into()))
})
.collect();
Ok(logs)
}

@ -35,7 +35,7 @@ pub struct FuelInterchainGasPaymasterIndexer {}
#[async_trait] #[async_trait]
impl Indexer<InterchainGasPayment> for FuelInterchainGasPaymasterIndexer { impl Indexer<InterchainGasPayment> for FuelInterchainGasPaymasterIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> {

@ -126,7 +126,7 @@ pub struct FuelMailboxIndexer {}
#[async_trait] #[async_trait]
impl Indexer<HyperlaneMessage> for FuelMailboxIndexer { impl Indexer<HyperlaneMessage> for FuelMailboxIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> {
@ -140,7 +140,7 @@ impl Indexer<HyperlaneMessage> for FuelMailboxIndexer {
#[async_trait] #[async_trait]
impl Indexer<H256> for FuelMailboxIndexer { impl Indexer<H256> for FuelMailboxIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> {

@ -246,7 +246,7 @@ impl SealevelInterchainGasPaymasterIndexer {
#[async_trait] #[async_trait]
impl Indexer<InterchainGasPayment> for SealevelInterchainGasPaymasterIndexer { impl Indexer<InterchainGasPayment> for SealevelInterchainGasPaymasterIndexer {
#[instrument(err, skip(self))] #[instrument(err, skip(self))]
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<InterchainGasPayment>, LogMeta)>> {

@ -646,7 +646,7 @@ impl SequenceAwareIndexer<HyperlaneMessage> for SealevelMailboxIndexer {
#[async_trait] #[async_trait]
impl Indexer<HyperlaneMessage> for SealevelMailboxIndexer { impl Indexer<HyperlaneMessage> for SealevelMailboxIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<HyperlaneMessage>, LogMeta)>> {
@ -670,7 +670,7 @@ impl Indexer<HyperlaneMessage> for SealevelMailboxIndexer {
#[async_trait] #[async_trait]
impl Indexer<H256> for SealevelMailboxIndexer { impl Indexer<H256> for SealevelMailboxIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
_range: RangeInclusive<u32>, _range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<H256>, LogMeta)>> {

@ -83,11 +83,11 @@ pub struct SealevelMerkleTreeHookIndexer(SealevelMailboxIndexer);
#[async_trait] #[async_trait]
impl Indexer<MerkleTreeInsertion> for SealevelMerkleTreeHookIndexer { impl Indexer<MerkleTreeInsertion> for SealevelMerkleTreeHookIndexer {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<MerkleTreeInsertion>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<MerkleTreeInsertion>, LogMeta)>> {
let messages = Indexer::<HyperlaneMessage>::fetch_logs(&self.0, range).await?; let messages = Indexer::<HyperlaneMessage>::fetch_logs_in_range(&self.0, range).await?;
let merkle_tree_insertions = messages let merkle_tree_insertions = messages
.into_iter() .into_iter()
.map(|(m, meta)| (message_to_merkle_tree_insertion(m.inner()).into(), meta)) .map(|(m, meta)| (message_to_merkle_tree_insertion(m.inner()).into(), meta))

@ -13,8 +13,18 @@ pub enum CursorType {
RateLimited, RateLimited,
} }
// H256 * 1M = 32MB per origin chain worst case
// With one such channel per origin chain.
const TX_ID_CHANNEL_CAPACITY: Option<usize> = Some(1_000_000);
pub trait Indexable { pub trait Indexable {
/// Returns the configured cursor type of this type for the given domain, (e.g. `SequenceAware` or `RateLimited`)
fn indexing_cursor(domain: HyperlaneDomainProtocol) -> CursorType; fn indexing_cursor(domain: HyperlaneDomainProtocol) -> CursorType;
/// Indexing tasks may have channels open between them to share information that improves reliability (such as the txid where a message event was indexed).
/// By default this method is None, and it should return a channel capacity if this indexing task is to broadcast anything to other tasks.
fn broadcast_channel_size() -> Option<usize> {
None
}
} }
impl Indexable for HyperlaneMessage { impl Indexable for HyperlaneMessage {
@ -26,6 +36,11 @@ impl Indexable for HyperlaneMessage {
HyperlaneDomainProtocol::Cosmos => CursorType::SequenceAware, HyperlaneDomainProtocol::Cosmos => CursorType::SequenceAware,
} }
} }
// Only broadcast txids from the message indexing task
fn broadcast_channel_size() -> Option<usize> {
TX_ID_CHANNEL_CAPACITY
}
} }
impl Indexable for InterchainGasPayment { impl Indexable for InterchainGasPayment {

@ -216,6 +216,16 @@ where
} }
} }
impl<T> Debug for RateLimitedContractSyncCursor<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RateLimitedContractSyncCursor")
.field("tip", &self.tip)
.field("last_tip_update", &self.last_tip_update)
.field("sync_state", &self.sync_state)
.finish()
}
}
#[cfg(test)] #[cfg(test)]
pub(crate) mod test { pub(crate) mod test {
use super::*; use super::*;
@ -234,7 +244,7 @@ pub(crate) mod test {
#[async_trait] #[async_trait]
impl Indexer<()> for Indexer { impl Indexer<()> for Indexer {
async fn fetch_logs(&self, range: RangeInclusive<u32>) -> ChainResult<Vec<(hyperlane_core::Indexed<()> , LogMeta)>>; async fn fetch_logs_in_range(&self, range: RangeInclusive<u32>) -> ChainResult<Vec<(hyperlane_core::Indexed<()> , LogMeta)>>;
async fn get_finalized_block_number(&self) -> ChainResult<u32>; async fn get_finalized_block_number(&self) -> ChainResult<u32>;
} }
} }

@ -9,10 +9,13 @@ use hyperlane_core::{
HyperlaneSequenceAwareIndexerStoreReader, IndexMode, Indexed, LogMeta, SequenceIndexed, HyperlaneSequenceAwareIndexerStoreReader, IndexMode, Indexed, LogMeta, SequenceIndexed,
}; };
use itertools::Itertools; use itertools::Itertools;
use tokio::time::sleep;
use tracing::{debug, instrument, warn}; use tracing::{debug, instrument, warn};
use super::{LastIndexedSnapshot, TargetSnapshot}; use super::{LastIndexedSnapshot, TargetSnapshot};
const MAX_BACKWARD_SYNC_BLOCKING_TIME: Duration = Duration::from_secs(5);
/// A sequence-aware cursor that syncs backward until there are no earlier logs to index. /// A sequence-aware cursor that syncs backward until there are no earlier logs to index.
pub(crate) struct BackwardSequenceAwareSyncCursor<T> { pub(crate) struct BackwardSequenceAwareSyncCursor<T> {
/// The max chunk size to query for logs. /// The max chunk size to query for logs.
@ -32,6 +35,17 @@ pub(crate) struct BackwardSequenceAwareSyncCursor<T> {
index_mode: IndexMode, index_mode: IndexMode,
} }
impl<T> Debug for BackwardSequenceAwareSyncCursor<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BackwardSequenceAwareSyncCursor")
.field("chunk_size", &self.chunk_size)
.field("last_indexed_snapshot", &self.last_indexed_snapshot)
.field("current_indexing_snapshot", &self.current_indexing_snapshot)
.field("index_mode", &self.index_mode)
.finish()
}
}
impl<T: Debug> BackwardSequenceAwareSyncCursor<T> { impl<T: Debug> BackwardSequenceAwareSyncCursor<T> {
#[instrument( #[instrument(
skip(db), skip(db),
@ -68,7 +82,11 @@ impl<T: Debug> BackwardSequenceAwareSyncCursor<T> {
#[instrument(ret)] #[instrument(ret)]
pub async fn get_next_range(&mut self) -> Result<Option<RangeInclusive<u32>>> { pub async fn get_next_range(&mut self) -> Result<Option<RangeInclusive<u32>>> {
// Skip any already indexed logs. // Skip any already indexed logs.
self.skip_indexed().await?; tokio::select! {
res = self.skip_indexed() => res?,
// return early to allow the forward cursor to also make progress
_ = sleep(MAX_BACKWARD_SYNC_BLOCKING_TIME) => { return Ok(None); }
};
// If `self.current_indexing_snapshot` is None, we are synced and there are no more ranges to query. // If `self.current_indexing_snapshot` is None, we are synced and there are no more ranges to query.
// Otherwise, we query the next range, searching for logs prior to and including the current indexing snapshot. // Otherwise, we query the next range, searching for logs prior to and including the current indexing snapshot.

@ -41,6 +41,18 @@ pub(crate) struct ForwardSequenceAwareSyncCursor<T> {
index_mode: IndexMode, index_mode: IndexMode,
} }
impl<T> Debug for ForwardSequenceAwareSyncCursor<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ForwardSequenceAwareSyncCursor")
.field("chunk_size", &self.chunk_size)
.field("last_indexed_snapshot", &self.last_indexed_snapshot)
.field("current_indexing_snapshot", &self.current_indexing_snapshot)
.field("target_snapshot", &self.target_snapshot)
.field("index_mode", &self.index_mode)
.finish()
}
}
impl<T: Debug> ForwardSequenceAwareSyncCursor<T> { impl<T: Debug> ForwardSequenceAwareSyncCursor<T> {
#[instrument( #[instrument(
skip(db, latest_sequence_querier), skip(db, latest_sequence_querier),
@ -493,7 +505,7 @@ pub(crate) mod test {
where where
T: Sequenced + Debug, T: Sequenced + Debug,
{ {
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
_range: RangeInclusive<u32>, _range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<T>, LogMeta)>> { ) -> ChainResult<Vec<(Indexed<T>, LogMeta)>> {

@ -62,6 +62,7 @@ pub enum SyncDirection {
/// A cursor that prefers to sync forward, but will sync backward if there is nothing to /// A cursor that prefers to sync forward, but will sync backward if there is nothing to
/// sync forward. /// sync forward.
#[derive(Debug)]
pub(crate) struct ForwardBackwardSequenceAwareSyncCursor<T> { pub(crate) struct ForwardBackwardSequenceAwareSyncCursor<T> {
forward: ForwardSequenceAwareSyncCursor<T>, forward: ForwardSequenceAwareSyncCursor<T>,
backward: BackwardSequenceAwareSyncCursor<T>, backward: BackwardSequenceAwareSyncCursor<T>,

@ -10,9 +10,13 @@ use hyperlane_core::{
HyperlaneSequenceAwareIndexerStore, HyperlaneWatermarkedLogStore, Indexer, HyperlaneSequenceAwareIndexerStore, HyperlaneWatermarkedLogStore, Indexer,
SequenceAwareIndexer, SequenceAwareIndexer,
}; };
use hyperlane_core::{Indexed, LogMeta, H512};
pub use metrics::ContractSyncMetrics; pub use metrics::ContractSyncMetrics;
use prometheus::core::{AtomicI64, AtomicU64, GenericCounter, GenericGauge};
use tokio::sync::broadcast::error::TryRecvError;
use tokio::sync::broadcast::{Receiver as BroadcastReceiver, Sender as BroadcastSender};
use tokio::time::sleep; use tokio::time::sleep;
use tracing::{debug, info, warn}; use tracing::{debug, info, instrument, trace, warn};
use crate::settings::IndexSettings; use crate::settings::IndexSettings;
@ -27,17 +31,33 @@ const SLEEP_DURATION: Duration = Duration::from_secs(5);
/// Entity that drives the syncing of an agent's db with on-chain data. /// Entity that drives the syncing of an agent's db with on-chain data.
/// Extracts chain-specific data (emitted checkpoints, messages, etc) from an /// Extracts chain-specific data (emitted checkpoints, messages, etc) from an
/// `indexer` and fills the agent's db with this data. /// `indexer` and fills the agent's db with this data.
#[derive(Debug, new, Clone)] #[derive(Debug)]
pub struct ContractSync<T, D: HyperlaneLogStore<T>, I: Indexer<T>> { pub struct ContractSync<T: Indexable, D: HyperlaneLogStore<T>, I: Indexer<T>> {
domain: HyperlaneDomain, domain: HyperlaneDomain,
db: D, db: D,
indexer: I, indexer: I,
metrics: ContractSyncMetrics, metrics: ContractSyncMetrics,
broadcast_sender: Option<BroadcastSender<H512>>,
_phantom: PhantomData<T>, _phantom: PhantomData<T>,
} }
impl<T: Indexable, D: HyperlaneLogStore<T>, I: Indexer<T>> ContractSync<T, D, I> {
/// Create a new ContractSync
pub fn new(domain: HyperlaneDomain, db: D, indexer: I, metrics: ContractSyncMetrics) -> Self {
Self {
domain,
db,
indexer,
metrics,
broadcast_sender: T::broadcast_channel_size().map(BroadcastSender::new),
_phantom: PhantomData,
}
}
}
impl<T, D, I> ContractSync<T, D, I> impl<T, D, I> ContractSync<T, D, I>
where where
T: Indexable + Debug + Send + Sync + Clone + Eq + Hash + 'static,
D: HyperlaneLogStore<T>, D: HyperlaneLogStore<T>,
I: Indexer<T> + 'static, I: Indexer<T> + 'static,
{ {
@ -45,36 +65,86 @@ where
pub fn domain(&self) -> &HyperlaneDomain { pub fn domain(&self) -> &HyperlaneDomain {
&self.domain &self.domain
} }
fn get_broadcaster(&self) -> Option<BroadcastSender<H512>> {
self.broadcast_sender.clone()
} }
impl<T, D, I> ContractSync<T, D, I>
where
T: Debug + Send + Sync + Clone + Eq + Hash + 'static,
D: HyperlaneLogStore<T>,
I: Indexer<T> + 'static,
{
/// Sync logs and write them to the LogStore /// Sync logs and write them to the LogStore
#[tracing::instrument(name = "ContractSync", fields(domain=self.domain().name()), skip(self, cursor))] #[instrument(name = "ContractSync", fields(domain=self.domain().name()), skip(self, opts))]
pub async fn sync(&self, label: &'static str, mut cursor: Box<dyn ContractSyncCursor<T>>) { pub async fn sync(&self, label: &'static str, mut opts: SyncOptions<T>) {
let chain_name = self.domain.as_ref(); let chain_name = self.domain.as_ref();
let indexed_height = self let indexed_height_metric = self
.metrics .metrics
.indexed_height .indexed_height
.with_label_values(&[label, chain_name]); .with_label_values(&[label, chain_name]);
let stored_logs = self let stored_logs_metric = self
.metrics .metrics
.stored_events .stored_events
.with_label_values(&[label, chain_name]); .with_label_values(&[label, chain_name]);
loop { loop {
indexed_height.set(cursor.latest_queried_block() as i64); if let Some(rx) = opts.tx_id_receiver.as_mut() {
self.fetch_logs_from_receiver(rx, &stored_logs_metric).await;
}
if let Some(cursor) = opts.cursor.as_mut() {
self.fetch_logs_with_cursor(cursor, &stored_logs_metric, &indexed_height_metric)
.await;
}
}
}
#[instrument(fields(domain=self.domain().name()), skip(self, recv, stored_logs_metric))]
async fn fetch_logs_from_receiver(
&self,
recv: &mut BroadcastReceiver<H512>,
stored_logs_metric: &GenericCounter<AtomicU64>,
) {
loop {
match recv.try_recv() {
Ok(tx_id) => {
let logs = match self.indexer.fetch_logs_by_tx_hash(tx_id).await {
Ok(logs) => logs,
Err(err) => {
warn!(?err, ?tx_id, "Error fetching logs for tx id");
continue;
}
};
let logs = self.dedupe_and_store_logs(logs, stored_logs_metric).await;
let num_logs = logs.len() as u64;
info!(
num_logs,
?tx_id,
sequences = ?logs.iter().map(|(log, _)| log.sequence).collect::<Vec<_>>(),
"Found log(s) for tx id"
);
}
Err(TryRecvError::Empty) => {
trace!("No txid received");
break;
}
Err(err) => {
warn!(?err, "Error receiving txid from channel");
break;
}
}
}
}
#[instrument(fields(domain=self.domain().name()), skip(self, stored_logs_metric, indexed_height_metric))]
async fn fetch_logs_with_cursor(
&self,
cursor: &mut Box<dyn ContractSyncCursor<T>>,
stored_logs_metric: &GenericCounter<AtomicU64>,
indexed_height_metric: &GenericGauge<AtomicI64>,
) {
indexed_height_metric.set(cursor.latest_queried_block() as i64);
let (action, eta) = match cursor.next_action().await { let (action, eta) = match cursor.next_action().await {
Ok((action, eta)) => (action, eta), Ok((action, eta)) => (action, eta),
Err(err) => { Err(err) => {
warn!(?err, "Error getting next action"); warn!(?err, "Error getting next action");
sleep(SLEEP_DURATION).await; sleep(SLEEP_DURATION).await;
continue; return;
} }
}; };
let sleep_duration = match action { let sleep_duration = match action {
@ -84,32 +154,33 @@ where
CursorAction::Query(range) => loop { CursorAction::Query(range) => loop {
debug!(?range, "Looking for events in index range"); debug!(?range, "Looking for events in index range");
let logs = match self.indexer.fetch_logs(range.clone()).await { let logs = match self.indexer.fetch_logs_in_range(range.clone()).await {
Ok(logs) => logs, Ok(logs) => logs,
Err(err) => { Err(err) => {
warn!(?err, "Error fetching logs"); warn!(?err, ?range, "Error fetching logs in range");
break SLEEP_DURATION; break SLEEP_DURATION;
} }
}; };
let deduped_logs = HashSet::<_>::from_iter(logs);
let logs = Vec::from_iter(deduped_logs);
let logs = self.dedupe_and_store_logs(logs, stored_logs_metric).await;
let logs_found = logs.len() as u64;
info!( info!(
?range, ?range,
num_logs = logs.len(), num_logs = logs_found,
estimated_time_to_sync = fmt_sync_time(eta), estimated_time_to_sync = fmt_sync_time(eta),
sequences = ?logs.iter().map(|(log, _)| log.sequence).collect::<Vec<_>>(),
cursor = ?cursor,
"Found log(s) in index range" "Found log(s) in index range"
); );
// Store deliveries
let stored = match self.db.store_logs(&logs).await { if let Some(tx) = self.broadcast_sender.as_ref() {
Ok(stored) => stored, logs.iter().for_each(|(_, meta)| {
Err(err) => { if let Err(err) = tx.send(meta.transaction_id) {
warn!(?err, "Error storing logs in db"); trace!(?err, "Error sending txid to receiver");
break SLEEP_DURATION;
} }
}; });
// Report amount of deliveries stored into db }
stored_logs.inc_by(stored as u64);
// Update cursor // Update cursor
if let Err(err) = cursor.update(logs, range).await { if let Err(err) = cursor.update(logs, range).await {
warn!(?err, "Error updating cursor"); warn!(?err, "Error updating cursor");
@ -119,8 +190,36 @@ where
}, },
CursorAction::Sleep(duration) => duration, CursorAction::Sleep(duration) => duration,
}; };
sleep(sleep_duration).await; sleep(sleep_duration).await
}
async fn dedupe_and_store_logs(
&self,
logs: Vec<(Indexed<T>, LogMeta)>,
stored_logs_metric: &GenericCounter<AtomicU64>,
) -> Vec<(Indexed<T>, LogMeta)> {
let deduped_logs = HashSet::<_>::from_iter(logs);
let logs = Vec::from_iter(deduped_logs);
// Store deliveries
let stored = match self.db.store_logs(&logs).await {
Ok(stored) => stored,
Err(err) => {
warn!(?err, "Error storing logs in db");
Default::default()
}
};
if stored > 0 {
debug!(
domain = self.domain.as_ref(),
count = stored,
sequences = ?logs.iter().map(|(log, _)| log.sequence).collect::<Vec<_>>(),
"Stored logs in db",
);
} }
// Report amount of deliveries stored into db
stored_logs_metric.inc_by(stored as u64);
logs
} }
} }
@ -141,16 +240,38 @@ pub trait ContractSyncer<T>: Send + Sync {
async fn cursor(&self, index_settings: IndexSettings) -> Box<dyn ContractSyncCursor<T>>; async fn cursor(&self, index_settings: IndexSettings) -> Box<dyn ContractSyncCursor<T>>;
/// Syncs events from the indexer using the provided cursor /// Syncs events from the indexer using the provided cursor
async fn sync(&self, label: &'static str, cursor: Box<dyn ContractSyncCursor<T>>); async fn sync(&self, label: &'static str, opts: SyncOptions<T>);
/// The domain of this syncer /// The domain of this syncer
fn domain(&self) -> &HyperlaneDomain; fn domain(&self) -> &HyperlaneDomain;
/// If this syncer is also a broadcaster, return the channel to receive txids
fn get_broadcaster(&self) -> Option<BroadcastSender<H512>>;
}
#[derive(new)]
/// Options for syncing events
pub struct SyncOptions<T> {
// Keep as optional fields for now to run them simultaneously.
// Might want to refactor into an enum later, where we either index with a cursor or rely on receiving
// txids from a channel to other indexing tasks
cursor: Option<Box<dyn ContractSyncCursor<T>>>,
tx_id_receiver: Option<BroadcastReceiver<H512>>,
}
impl<T> From<Box<dyn ContractSyncCursor<T>>> for SyncOptions<T> {
fn from(cursor: Box<dyn ContractSyncCursor<T>>) -> Self {
Self {
cursor: Some(cursor),
tx_id_receiver: None,
}
}
} }
#[async_trait] #[async_trait]
impl<T> ContractSyncer<T> for WatermarkContractSync<T> impl<T> ContractSyncer<T> for WatermarkContractSync<T>
where where
T: Debug + Send + Sync + Clone + Eq + Hash + 'static, T: Indexable + Debug + Send + Sync + Clone + Eq + Hash + 'static,
{ {
/// Returns a new cursor to be used for syncing events from the indexer based on time /// Returns a new cursor to be used for syncing events from the indexer based on time
async fn cursor(&self, index_settings: IndexSettings) -> Box<dyn ContractSyncCursor<T>> { async fn cursor(&self, index_settings: IndexSettings) -> Box<dyn ContractSyncCursor<T>> {
@ -172,13 +293,17 @@ where
) )
} }
async fn sync(&self, label: &'static str, cursor: Box<dyn ContractSyncCursor<T>>) { async fn sync(&self, label: &'static str, opts: SyncOptions<T>) {
ContractSync::sync(self, label, cursor).await; ContractSync::sync(self, label, opts).await
} }
fn domain(&self) -> &HyperlaneDomain { fn domain(&self) -> &HyperlaneDomain {
ContractSync::domain(self) ContractSync::domain(self)
} }
fn get_broadcaster(&self) -> Option<BroadcastSender<H512>> {
ContractSync::get_broadcaster(self)
}
} }
/// Log store for sequence aware cursors /// Log store for sequence aware cursors
@ -191,7 +316,7 @@ pub type SequencedDataContractSync<T> =
#[async_trait] #[async_trait]
impl<T> ContractSyncer<T> for SequencedDataContractSync<T> impl<T> ContractSyncer<T> for SequencedDataContractSync<T>
where where
T: Send + Sync + Debug + Clone + Eq + Hash + 'static, T: Indexable + Send + Sync + Debug + Clone + Eq + Hash + 'static,
{ {
/// Returns a new cursor to be used for syncing dispatched messages from the indexer /// Returns a new cursor to be used for syncing dispatched messages from the indexer
async fn cursor(&self, index_settings: IndexSettings) -> Box<dyn ContractSyncCursor<T>> { async fn cursor(&self, index_settings: IndexSettings) -> Box<dyn ContractSyncCursor<T>> {
@ -207,11 +332,15 @@ where
) )
} }
async fn sync(&self, label: &'static str, cursor: Box<dyn ContractSyncCursor<T>>) { async fn sync(&self, label: &'static str, opts: SyncOptions<T>) {
ContractSync::sync(self, label, cursor).await; ContractSync::sync(self, label, opts).await;
} }
fn domain(&self) -> &HyperlaneDomain { fn domain(&self) -> &HyperlaneDomain {
ContractSync::domain(self) ContractSync::domain(self)
} }
fn get_broadcaster(&self) -> Option<BroadcastSender<H512>> {
ContractSync::get_broadcaster(self)
}
} }

@ -242,10 +242,10 @@ impl HyperlaneRocksDB {
&self, &self,
event: InterchainGasExpenditure, event: InterchainGasExpenditure,
) -> DbResult<()> { ) -> DbResult<()> {
let existing_payment = self.retrieve_gas_expenditure_by_message_id(event.message_id)?; let existing_expenditure = self.retrieve_gas_expenditure_by_message_id(event.message_id)?;
let total = existing_payment + event; let total = existing_expenditure + event;
debug!(?event, new_total_gas_payment=?total, "Storing gas payment"); debug!(?event, new_total_gas_expenditure=?total, "Storing gas expenditure");
self.store_interchain_gas_expenditure_data_by_message_id( self.store_interchain_gas_expenditure_data_by_message_id(
&total.message_id, &total.message_id,
&InterchainGasExpenditureData { &InterchainGasExpenditureData {

@ -160,7 +160,7 @@ impl Settings {
db: Arc<D>, db: Arc<D>,
) -> eyre::Result<Arc<SequencedDataContractSync<T>>> ) -> eyre::Result<Arc<SequencedDataContractSync<T>>>
where where
T: Debug, T: Indexable + Debug,
SequenceIndexer<T>: TryFromWithMetrics<ChainConf>, SequenceIndexer<T>: TryFromWithMetrics<ChainConf>,
D: HyperlaneLogStore<T> + HyperlaneSequenceAwareIndexerStoreReader<T> + 'static, D: HyperlaneLogStore<T> + HyperlaneSequenceAwareIndexerStoreReader<T> + 'static,
{ {
@ -184,7 +184,7 @@ impl Settings {
db: Arc<D>, db: Arc<D>,
) -> eyre::Result<Arc<WatermarkContractSync<T>>> ) -> eyre::Result<Arc<WatermarkContractSync<T>>>
where where
T: Debug, T: Indexable + Debug,
SequenceIndexer<T>: TryFromWithMetrics<ChainConf>, SequenceIndexer<T>: TryFromWithMetrics<ChainConf>,
D: HyperlaneLogStore<T> + HyperlaneWatermarkedLogStore<T> + 'static, D: HyperlaneLogStore<T> + HyperlaneWatermarkedLogStore<T> + 'static,
{ {

@ -49,7 +49,7 @@ uint.workspace = true
tokio = { workspace = true, features = ["rt", "time"] } tokio = { workspace = true, features = ["rt", "time"] }
[features] [features]
default = [] default = ["strum"]
float = [] float = []
test-utils = ["dep:config"] test-utils = ["dep:config"]
agent = ["ethers", "strum"] agent = ["ethers", "strum"]

@ -51,6 +51,7 @@ impl<'a> std::fmt::Display for ContractLocator<'a> {
pub enum KnownHyperlaneDomain { pub enum KnownHyperlaneDomain {
Ethereum = 1, Ethereum = 1,
Sepolia = 11155111, Sepolia = 11155111,
Holesky = 17000,
Polygon = 137, Polygon = 137,
@ -82,6 +83,18 @@ pub enum KnownHyperlaneDomain {
Injective = 6909546, Injective = 6909546,
InEvm = 2525, InEvm = 2525,
Ancient8 = 888888888,
Blast = 81457,
Mode = 34443,
Redstone = 690,
Viction = 88,
Zetachain = 7000,
PlumeTestnet = 161221135, PlumeTestnet = 161221135,
// -- Local test chains -- // -- Local test chains --
@ -215,10 +228,11 @@ impl KnownHyperlaneDomain {
many_to_one!(match self { many_to_one!(match self {
Mainnet: [ Mainnet: [
Ethereum, Avalanche, Arbitrum, Polygon, Optimism, BinanceSmartChain, Celo, Ethereum, Avalanche, Arbitrum, Polygon, Optimism, BinanceSmartChain, Celo,
Moonbeam, Gnosis, MantaPacific, Neutron, Injective, InEvm Moonbeam, Gnosis, MantaPacific, Neutron, Injective, InEvm, Ancient8, Blast,
Mode, Redstone, Viction, Zetachain
], ],
Testnet: [ Testnet: [
Alfajores, MoonbaseAlpha, Sepolia, ScrollSepolia, Chiado, PlumeTestnet, Fuji, BinanceSmartChainTestnet Alfajores, MoonbaseAlpha, Sepolia, ScrollSepolia, Chiado, PlumeTestnet, Fuji, BinanceSmartChainTestnet, Holesky
], ],
LocalTestChain: [Test1, Test2, Test3, FuelTest1, SealevelTest1, SealevelTest2, CosmosTest99990, CosmosTest99991], LocalTestChain: [Test1, Test2, Test3, FuelTest1, SealevelTest1, SealevelTest2, CosmosTest99990, CosmosTest99991],
}) })
@ -229,10 +243,11 @@ impl KnownHyperlaneDomain {
many_to_one!(match self { many_to_one!(match self {
HyperlaneDomainProtocol::Ethereum: [ HyperlaneDomainProtocol::Ethereum: [
Ethereum, Sepolia, Polygon, Avalanche, Fuji, Arbitrum, Ethereum, Sepolia, Holesky, Polygon, Avalanche, Fuji, Arbitrum,
Optimism, BinanceSmartChain, BinanceSmartChainTestnet, Celo, Gnosis, Optimism, BinanceSmartChain, BinanceSmartChainTestnet, Celo, Gnosis,
Alfajores, Moonbeam, InEvm, MoonbaseAlpha, ScrollSepolia, Alfajores, Moonbeam, InEvm, Ancient8, Blast, Mode, Redstone, Viction,
Chiado, MantaPacific, PlumeTestnet, Test1, Test2, Test3 Zetachain, MoonbaseAlpha, ScrollSepolia, Chiado, MantaPacific, PlumeTestnet,
Test1, Test2, Test3
], ],
HyperlaneDomainProtocol::Fuel: [FuelTest1], HyperlaneDomainProtocol::Fuel: [FuelTest1],
HyperlaneDomainProtocol::Sealevel: [SealevelTest1, SealevelTest2], HyperlaneDomainProtocol::Sealevel: [SealevelTest1, SealevelTest2],
@ -246,9 +261,10 @@ impl KnownHyperlaneDomain {
many_to_one!(match self { many_to_one!(match self {
HyperlaneDomainTechnicalStack::ArbitrumNitro: [Arbitrum, PlumeTestnet], HyperlaneDomainTechnicalStack::ArbitrumNitro: [Arbitrum, PlumeTestnet],
HyperlaneDomainTechnicalStack::Other: [ HyperlaneDomainTechnicalStack::Other: [
Ethereum, Sepolia, Polygon, Avalanche, Fuji, Optimism, Ethereum, Sepolia, Holesky, Polygon, Avalanche, Fuji, Optimism,
BinanceSmartChain, BinanceSmartChainTestnet, Celo, Gnosis, Alfajores, Moonbeam, MoonbaseAlpha, BinanceSmartChain, BinanceSmartChainTestnet, Celo, Gnosis, Alfajores, Moonbeam, MoonbaseAlpha,
ScrollSepolia, Chiado, MantaPacific, Neutron, Injective, InEvm, ScrollSepolia, Chiado, MantaPacific, Neutron, Injective, InEvm, Ancient8, Blast, Mode, Redstone,
Viction, Zetachain,
Test1, Test2, Test3, FuelTest1, SealevelTest1, SealevelTest2, CosmosTest99990, CosmosTest99991 Test1, Test2, Test3, FuelTest1, SealevelTest1, SealevelTest2, CosmosTest99990, CosmosTest99991
], ],
}) })

@ -1,4 +1,8 @@
use std::{fmt, ops::RangeInclusive, time::Duration}; use std::{
fmt::{self, Debug},
ops::RangeInclusive,
time::Duration,
};
use async_trait::async_trait; use async_trait::async_trait;
use auto_impl::auto_impl; use auto_impl::auto_impl;
@ -9,7 +13,7 @@ use crate::{Indexed, LogMeta};
/// A cursor governs event indexing for a contract. /// A cursor governs event indexing for a contract.
#[async_trait] #[async_trait]
#[auto_impl(Box)] #[auto_impl(Box)]
pub trait ContractSyncCursor<T>: Send + Sync + 'static { pub trait ContractSyncCursor<T>: Debug + Send + Sync + 'static {
/// The next block range that should be queried. /// The next block range that should be queried.
/// This method should be tolerant to being called multiple times in a row /// This method should be tolerant to being called multiple times in a row
/// without any updates in between. /// without any updates in between.

@ -11,7 +11,7 @@ use async_trait::async_trait;
use auto_impl::auto_impl; use auto_impl::auto_impl;
use serde::Deserialize; use serde::Deserialize;
use crate::{ChainResult, Indexed, LogMeta}; use crate::{ChainResult, Indexed, LogMeta, H512};
/// Indexing mode. /// Indexing mode.
#[derive(Copy, Debug, Default, Deserialize, Clone)] #[derive(Copy, Debug, Default, Deserialize, Clone)]
@ -29,13 +29,21 @@ pub enum IndexMode {
#[auto_impl(&, Box, Arc,)] #[auto_impl(&, Box, Arc,)]
pub trait Indexer<T: Sized>: Send + Sync + Debug { pub trait Indexer<T: Sized>: Send + Sync + Debug {
/// Fetch list of logs between blocks `from` and `to`, inclusive. /// Fetch list of logs between blocks `from` and `to`, inclusive.
async fn fetch_logs( async fn fetch_logs_in_range(
&self, &self,
range: RangeInclusive<u32>, range: RangeInclusive<u32>,
) -> ChainResult<Vec<(Indexed<T>, LogMeta)>>; ) -> ChainResult<Vec<(Indexed<T>, LogMeta)>>;
/// Get the chain's latest block number that has reached finality /// Get the chain's latest block number that has reached finality
async fn get_finalized_block_number(&self) -> ChainResult<u32>; async fn get_finalized_block_number(&self) -> ChainResult<u32>;
/// Fetch list of logs emitted in a transaction with the given hash.
async fn fetch_logs_by_tx_hash(
&self,
_tx_hash: H512,
) -> ChainResult<Vec<(Indexed<T>, LogMeta)>> {
Ok(vec![])
}
} }
/// Interface for indexing data in sequence. /// Interface for indexing data in sequence.

@ -10,6 +10,7 @@ pub use interchain_security_module::*;
pub use mailbox::*; pub use mailbox::*;
pub use merkle_tree_hook::*; pub use merkle_tree_hook::*;
pub use multisig_ism::*; pub use multisig_ism::*;
pub use pending_operation::*;
pub use provider::*; pub use provider::*;
pub use routing_ism::*; pub use routing_ism::*;
pub use signing::*; pub use signing::*;
@ -29,6 +30,7 @@ mod interchain_security_module;
mod mailbox; mod mailbox;
mod merkle_tree_hook; mod merkle_tree_hook;
mod multisig_ism; mod multisig_ism;
mod pending_operation;
mod provider; mod provider;
mod routing_ism; mod routing_ism;
mod signing; mod signing;

@ -4,10 +4,16 @@ use std::{
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use crate::{
ChainResult, FixedPointNumber, HyperlaneDomain, HyperlaneMessage, TryBatchAs, TxOutcome, H256,
U256,
};
use async_trait::async_trait; use async_trait::async_trait;
use hyperlane_core::{HyperlaneDomain, HyperlaneMessage, TryBatchAs, TxOutcome, H256}; use num::CheckedDiv;
use tracing::warn;
use super::op_queue::QueueOperation; /// Boxed operation that can be stored in an operation queue
pub type QueueOperation = Box<dyn PendingOperation>;
/// A pending operation that will be run by the submitter and cause a /// A pending operation that will be run by the submitter and cause a
/// transaction to be sent. /// transaction to be sent.
@ -67,11 +73,21 @@ pub trait PendingOperation: Send + Sync + Debug + TryBatchAs<HyperlaneMessage> {
/// Set the outcome of the `submit` call /// Set the outcome of the `submit` call
fn set_submission_outcome(&mut self, outcome: TxOutcome); fn set_submission_outcome(&mut self, outcome: TxOutcome);
/// Get the estimated the cost of the `submit` call
fn get_tx_cost_estimate(&self) -> Option<U256>;
/// This will be called after the operation has been submitted and is /// This will be called after the operation has been submitted and is
/// responsible for checking if the operation has reached a point at /// responsible for checking if the operation has reached a point at
/// which we consider it safe from reorgs. /// which we consider it safe from reorgs.
async fn confirm(&mut self) -> PendingOperationResult; async fn confirm(&mut self) -> PendingOperationResult;
/// Record the outcome of the operation
fn set_operation_outcome(
&mut self,
submission_outcome: TxOutcome,
submission_estimated_cost: U256,
);
/// Get the earliest instant at which this should next be attempted. /// Get the earliest instant at which this should next be attempted.
/// ///
/// This is only used for sorting, the functions are responsible for /// This is only used for sorting, the functions are responsible for
@ -85,11 +101,41 @@ pub trait PendingOperation: Send + Sync + Debug + TryBatchAs<HyperlaneMessage> {
/// retried immediately. /// retried immediately.
fn reset_attempts(&mut self); fn reset_attempts(&mut self);
#[cfg(test)]
/// Set the number of times this operation has been retried. /// Set the number of times this operation has been retried.
#[cfg(any(test, feature = "test-utils"))]
fn set_retries(&mut self, retries: u32); fn set_retries(&mut self, retries: u32);
} }
/// Utility fn to calculate the total estimated cost of an operation batch
pub fn total_estimated_cost(ops: &[Box<dyn PendingOperation>]) -> U256 {
ops.iter()
.fold(U256::zero(), |acc, op| match op.get_tx_cost_estimate() {
Some(cost_estimate) => acc.saturating_add(cost_estimate),
None => {
warn!(operation=?op, "No cost estimate available for operation, defaulting to 0");
acc
}
})
}
/// Calculate the gas used by an operation (either in a batch or single-submission), by looking at the total cost of the tx,
/// and the estimated cost of the operation compared to the sum of the estimates of all operations in the batch.
/// When using this for single-submission rather than a batch,
/// the `tx_estimated_cost` should be the same as the `tx_estimated_cost`
pub fn gas_used_by_operation(
tx_outcome: &TxOutcome,
tx_estimated_cost: U256,
operation_estimated_cost: U256,
) -> ChainResult<U256> {
let gas_used_by_tx = FixedPointNumber::try_from(tx_outcome.gas_used)?;
let operation_gas_estimate = FixedPointNumber::try_from(operation_estimated_cost)?;
let tx_gas_estimate = FixedPointNumber::try_from(tx_estimated_cost)?;
let gas_used_by_operation = (gas_used_by_tx * operation_gas_estimate)
.checked_div(&tx_gas_estimate)
.ok_or(eyre::eyre!("Division by zero"))?;
gas_used_by_operation.try_into()
}
impl Display for QueueOperation { impl Display for QueueOperation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!( write!(
@ -138,6 +184,7 @@ impl Ord for QueueOperation {
} }
} }
/// Possible outcomes of performing an action on a pending operation (such as `prepare`, `submit` or `confirm`).
#[derive(Debug)] #[derive(Debug)]
pub enum PendingOperationResult { pub enum PendingOperationResult {
/// Promote to the next step /// Promote to the next step
@ -153,6 +200,7 @@ pub enum PendingOperationResult {
} }
/// create a `op_try!` macro for the `on_retry` handler. /// create a `op_try!` macro for the `on_retry` handler.
#[macro_export]
macro_rules! make_op_try { macro_rules! make_op_try {
($on_retry:expr) => { ($on_retry:expr) => {
/// Handle a result and either return early with retry or a critical failure on /// Handle a result and either return early with retry or a critical failure on
@ -181,5 +229,3 @@ macro_rules! make_op_try {
} }
}; };
} }
pub(super) use make_op_try;

@ -1,50 +0,0 @@
use derive_new::new;
use tokio::sync::broadcast::{Receiver, Sender};
/// Multi-producer, multi-consumer channel
pub struct MpmcChannel<T> {
sender: Sender<T>,
receiver: MpmcReceiver<T>,
}
impl<T: Clone> MpmcChannel<T> {
/// Creates a new `MpmcChannel` with the specified capacity.
///
/// # Arguments
///
/// * `capacity` - The maximum number of messages that can be buffered in the channel.
pub fn new(capacity: usize) -> Self {
let (sender, receiver) = tokio::sync::broadcast::channel(capacity);
Self {
sender: sender.clone(),
receiver: MpmcReceiver::new(sender, receiver),
}
}
/// Returns a clone of the sender end of the channel.
pub fn sender(&self) -> Sender<T> {
self.sender.clone()
}
/// Returns a clone of the receiver end of the channel.
pub fn receiver(&self) -> MpmcReceiver<T> {
self.receiver.clone()
}
}
/// Clonable receiving end of a multi-producer, multi-consumer channel
#[derive(Debug, new)]
pub struct MpmcReceiver<T> {
sender: Sender<T>,
/// The receiving end of the channel.
pub receiver: Receiver<T>,
}
impl<T> Clone for MpmcReceiver<T> {
fn clone(&self) -> Self {
Self {
sender: self.sender.clone(),
receiver: self.sender.subscribe(),
}
}
}

@ -8,8 +8,6 @@ pub use self::primitive_types::*;
pub use ::primitive_types as ethers_core_types; pub use ::primitive_types as ethers_core_types;
pub use announcement::*; pub use announcement::*;
pub use chain_data::*; pub use chain_data::*;
#[cfg(feature = "async")]
pub use channel::*;
pub use checkpoint::*; pub use checkpoint::*;
pub use indexing::*; pub use indexing::*;
pub use log_metadata::*; pub use log_metadata::*;
@ -21,8 +19,6 @@ use crate::{Decode, Encode, HyperlaneProtocolError};
mod announcement; mod announcement;
mod chain_data; mod chain_data;
#[cfg(feature = "async")]
mod channel;
mod checkpoint; mod checkpoint;
mod indexing; mod indexing;
mod log_metadata; mod log_metadata;

@ -3,11 +3,15 @@
#![allow(clippy::assign_op_pattern)] #![allow(clippy::assign_op_pattern)]
#![allow(clippy::reversed_empty_ranges)] #![allow(clippy::reversed_empty_ranges)]
use std::{ops::Mul, str::FromStr}; use std::{
ops::{Div, Mul},
str::FromStr,
};
use bigdecimal::{BigDecimal, RoundingMode}; use bigdecimal::{BigDecimal, RoundingMode};
use borsh::{BorshDeserialize, BorshSerialize}; use borsh::{BorshDeserialize, BorshSerialize};
use fixed_hash::impl_fixed_hash_conversions; use fixed_hash::impl_fixed_hash_conversions;
use num::CheckedDiv;
use num_traits::Zero; use num_traits::Zero;
use uint::construct_uint; use uint::construct_uint;
@ -421,6 +425,27 @@ where
} }
} }
impl<T> Div<T> for FixedPointNumber
where
T: Into<FixedPointNumber>,
{
type Output = FixedPointNumber;
fn div(self, rhs: T) -> Self::Output {
let rhs = rhs.into();
Self(self.0 / rhs.0)
}
}
impl CheckedDiv for FixedPointNumber {
fn checked_div(&self, v: &Self) -> Option<Self> {
if v.0.is_zero() {
return None;
}
Some(Self(self.0.clone() / v.0.clone()))
}
}
impl FromStr for FixedPointNumber { impl FromStr for FixedPointNumber {
type Err = ChainCommunicationError; type Err = ChainCommunicationError;

@ -118,7 +118,7 @@ impl BacktraceFrameFmt<'_, '_, '_> {
symbol.name(), symbol.name(),
// TODO: this isn't great that we don't end up printing anything // TODO: this isn't great that we don't end up printing anything
// with non-utf8 filenames. Thankfully almost everything is utf8 so // with non-utf8 filenames. Thankfully almost everything is utf8 so
// this shouldn't be too too bad. // this shouldn't be too bad.
symbol symbol
.filename() .filename()
.and_then(|p| Some(BytesOrWideString::Bytes(p.to_str()?.as_bytes()))), .and_then(|p| Some(BytesOrWideString::Bytes(p.to_str()?.as_bytes()))),

@ -28,11 +28,13 @@ ethers-contract.workspace = true
tokio.workspace = true tokio.workspace = true
maplit.workspace = true maplit.workspace = true
nix = { workspace = true, features = ["signal"], default-features = false } nix = { workspace = true, features = ["signal"], default-features = false }
once_cell.workspace = true
tempfile.workspace = true tempfile.workspace = true
ureq = { workspace = true, default-features = false } ureq = { workspace = true, default-features = false }
which.workspace = true which.workspace = true
macro_rules_attribute.workspace = true macro_rules_attribute.workspace = true
regex.workspace = true regex.workspace = true
relayer = { path = "../../agents/relayer"}
hyperlane-cosmwasm-interface.workspace = true hyperlane-cosmwasm-interface.workspace = true
cosmwasm-schema.workspace = true cosmwasm-schema.workspace = true

@ -6,6 +6,7 @@ pub struct Config {
pub ci_mode: bool, pub ci_mode: bool,
pub ci_mode_timeout: u64, pub ci_mode_timeout: u64,
pub kathy_messages: u64, pub kathy_messages: u64,
pub sealevel_enabled: bool,
// TODO: Include count of sealevel messages in a field separate from `kathy_messages`? // TODO: Include count of sealevel messages in a field separate from `kathy_messages`?
} }
@ -26,6 +27,9 @@ impl Config {
.map(|r| r.parse::<u64>().unwrap()); .map(|r| r.parse::<u64>().unwrap());
r.unwrap_or(16) r.unwrap_or(16)
}, },
sealevel_enabled: env::var("SEALEVEL_ENABLED")
.map(|k| k.parse::<bool>().unwrap())
.unwrap_or(true),
}) })
} }
} }

@ -152,7 +152,7 @@ impl OsmosisCLI {
.arg("grpc.address", &endpoint.grpc_addr) // default is 0.0.0.0:9090 .arg("grpc.address", &endpoint.grpc_addr) // default is 0.0.0.0:9090
.arg("rpc.pprof_laddr", pprof_addr) // default is localhost:6060 .arg("rpc.pprof_laddr", pprof_addr) // default is localhost:6060
.arg("log_level", "panic") .arg("log_level", "panic")
.spawn("COSMOS"); .spawn("COSMOS", None);
endpoint.wait_for_node(); endpoint.wait_for_node();

@ -271,7 +271,7 @@ fn launch_cosmos_validator(
.hyp_env("SIGNER_SIGNER_TYPE", "hexKey") .hyp_env("SIGNER_SIGNER_TYPE", "hexKey")
.hyp_env("SIGNER_KEY", agent_config.signer.key) .hyp_env("SIGNER_KEY", agent_config.signer.key)
.hyp_env("TRACING_LEVEL", if debug { "debug" } else { "info" }) .hyp_env("TRACING_LEVEL", if debug { "debug" } else { "info" })
.spawn("VAL"); .spawn("VAL", None);
validator validator
} }
@ -299,7 +299,7 @@ fn launch_cosmos_relayer(
.hyp_env("TRACING_LEVEL", if debug { "debug" } else { "info" }) .hyp_env("TRACING_LEVEL", if debug { "debug" } else { "info" })
.hyp_env("GASPAYMENTENFORCEMENT", "[{\"type\": \"none\"}]") .hyp_env("GASPAYMENTENFORCEMENT", "[{\"type\": \"none\"}]")
.hyp_env("METRICSPORT", metrics.to_string()) .hyp_env("METRICSPORT", metrics.to_string())
.spawn("RLY"); .spawn("RLY", None);
relayer relayer
} }

@ -36,7 +36,7 @@ pub fn start_anvil(config: Arc<Config>) -> AgentHandles {
} }
log!("Launching anvil..."); log!("Launching anvil...");
let anvil_args = Program::new("anvil").flag("silent").filter_logs(|_| false); // for now do not keep any of the anvil logs let anvil_args = Program::new("anvil").flag("silent").filter_logs(|_| false); // for now do not keep any of the anvil logs
let anvil = anvil_args.spawn("ETH"); let anvil = anvil_args.spawn("ETH", None);
sleep(Duration::from_secs(10)); sleep(Duration::from_secs(10));

@ -1,14 +1,15 @@
// use std::path::Path; use std::fs::File;
use std::path::Path; use std::path::Path;
use crate::config::Config; use crate::config::Config;
use crate::metrics::agent_balance_sum; use crate::metrics::agent_balance_sum;
use crate::utils::get_matching_lines;
use maplit::hashmap; use maplit::hashmap;
use relayer::GAS_EXPENDITURE_LOG_MESSAGE;
use crate::logging::log; use crate::logging::log;
use crate::solana::solana_termination_invariants_met; use crate::solana::solana_termination_invariants_met;
use crate::{fetch_metric, ZERO_MERKLE_INSERTION_KATHY_MESSAGES}; use crate::{fetch_metric, AGENT_LOGGING_DIR, ZERO_MERKLE_INSERTION_KATHY_MESSAGES};
// This number should be even, so the messages can be split into two equal halves // This number should be even, so the messages can be split into two equal halves
// sent before and after the relayer spins up, to avoid rounding errors. // sent before and after the relayer spins up, to avoid rounding errors.
@ -19,11 +20,16 @@ pub const SOL_MESSAGES_EXPECTED: u32 = 20;
pub fn termination_invariants_met( pub fn termination_invariants_met(
config: &Config, config: &Config,
starting_relayer_balance: f64, starting_relayer_balance: f64,
solana_cli_tools_path: &Path, solana_cli_tools_path: Option<&Path>,
solana_config_path: &Path, solana_config_path: Option<&Path>,
) -> eyre::Result<bool> { ) -> eyre::Result<bool> {
let eth_messages_expected = (config.kathy_messages / 2) as u32 * 2; let eth_messages_expected = (config.kathy_messages / 2) as u32 * 2;
let total_messages_expected = eth_messages_expected + SOL_MESSAGES_EXPECTED; let sol_messages_expected = if config.sealevel_enabled {
SOL_MESSAGES_EXPECTED
} else {
0
};
let total_messages_expected = eth_messages_expected + sol_messages_expected;
let lengths = fetch_metric("9092", "hyperlane_submitter_queue_length", &hashmap! {})?; let lengths = fetch_metric("9092", "hyperlane_submitter_queue_length", &hashmap! {})?;
assert!(!lengths.is_empty(), "Could not find queue length metric"); assert!(!lengths.is_empty(), "Could not find queue length metric");
@ -55,6 +61,19 @@ pub fn termination_invariants_met(
.iter() .iter()
.sum::<u32>(); .sum::<u32>();
let log_file_path = AGENT_LOGGING_DIR.join("RLY-output.log");
let relayer_logfile = File::open(log_file_path)?;
let gas_expenditure_log_count =
get_matching_lines(&relayer_logfile, GAS_EXPENDITURE_LOG_MESSAGE)
.unwrap()
.len();
// Zero insertion messages don't reach `submit` stage where gas is spent, so we only expect these logs for the other messages.
assert_eq!(
gas_expenditure_log_count as u32, total_messages_expected,
"Didn't record gas payment for all delivered messages"
);
let gas_payment_sealevel_events_count = fetch_metric( let gas_payment_sealevel_events_count = fetch_metric(
"9092", "9092",
"hyperlane_contract_sync_stored_events", "hyperlane_contract_sync_stored_events",
@ -76,10 +95,14 @@ pub fn termination_invariants_met(
return Ok(false); return Ok(false);
} }
if let Some((solana_cli_tools_path, solana_config_path)) =
solana_cli_tools_path.zip(solana_config_path)
{
if !solana_termination_invariants_met(solana_cli_tools_path, solana_config_path) { if !solana_termination_invariants_met(solana_cli_tools_path, solana_config_path) {
log!("Solana termination invariants not met"); log!("Solana termination invariants not met");
return Ok(false); return Ok(false);
} }
}
let dispatched_messages_scraped = fetch_metric( let dispatched_messages_scraped = fetch_metric(
"9093", "9093",

@ -11,12 +11,17 @@
//! the end conditions are met, the test is a failure. Defaults to 10 min. //! the end conditions are met, the test is a failure. Defaults to 10 min.
//! - `E2E_KATHY_MESSAGES`: Number of kathy messages to dispatch. Defaults to 16 if CI mode is enabled. //! - `E2E_KATHY_MESSAGES`: Number of kathy messages to dispatch. Defaults to 16 if CI mode is enabled.
//! else false. //! else false.
//! - `SEALEVEL_ENABLED`: true/false, enables sealevel testing. Defaults to true.
use std::{ use std::{
fs, collections::HashMap,
fs::{self, File},
path::Path, path::Path,
process::{Child, ExitCode}, process::{Child, ExitCode},
sync::atomic::{AtomicBool, Ordering}, sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex,
},
thread::sleep, thread::sleep,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
@ -24,6 +29,7 @@ use std::{
use ethers_contract::MULTICALL_ADDRESS; use ethers_contract::MULTICALL_ADDRESS;
use logging::log; use logging::log;
pub use metrics::fetch_metric; pub use metrics::fetch_metric;
use once_cell::sync::Lazy;
use program::Program; use program::Program;
use tempfile::tempdir; use tempfile::tempdir;
@ -46,6 +52,12 @@ mod program;
mod solana; mod solana;
mod utils; mod utils;
pub static AGENT_LOGGING_DIR: Lazy<&Path> = Lazy::new(|| {
let dir = Path::new("/tmp/test_logs");
fs::create_dir_all(dir).unwrap();
dir
});
/// These private keys are from hardhat/anvil's testing accounts. /// These private keys are from hardhat/anvil's testing accounts.
const RELAYER_KEYS: &[&str] = &[ const RELAYER_KEYS: &[&str] = &[
// test1 // test1
@ -61,17 +73,18 @@ const RELAYER_KEYS: &[&str] = &[
]; ];
/// These private keys are from hardhat/anvil's testing accounts. /// These private keys are from hardhat/anvil's testing accounts.
/// These must be consistent with the ISM config for the test. /// These must be consistent with the ISM config for the test.
const VALIDATOR_KEYS: &[&str] = &[ const ETH_VALIDATOR_KEYS: &[&str] = &[
// eth // eth
"0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a", "0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a",
"0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba", "0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba",
"0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e", "0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e",
];
const SEALEVEL_VALIDATOR_KEYS: &[&str] = &[
// sealevel // sealevel
"0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d",
]; ];
const VALIDATOR_ORIGIN_CHAINS: &[&str] = &["test1", "test2", "test3", "sealeveltest1"];
const AGENT_BIN_PATH: &str = "target/debug"; const AGENT_BIN_PATH: &str = "target/debug";
const INFRA_PATH: &str = "../typescript/infra"; const INFRA_PATH: &str = "../typescript/infra";
const MONOREPO_ROOT_PATH: &str = "../"; const MONOREPO_ROOT_PATH: &str = "../";
@ -87,14 +100,15 @@ static SHUTDOWN: AtomicBool = AtomicBool::new(false);
/// cleanup purposes at this time. /// cleanup purposes at this time.
#[derive(Default)] #[derive(Default)]
struct State { struct State {
agents: Vec<(String, Child)>, #[allow(clippy::type_complexity)]
agents: HashMap<String, (Child, Option<Arc<Mutex<File>>>)>,
watchers: Vec<Box<dyn TaskHandle<Output = ()>>>, watchers: Vec<Box<dyn TaskHandle<Output = ()>>>,
data: Vec<Box<dyn ArbitraryData>>, data: Vec<Box<dyn ArbitraryData>>,
} }
impl State { impl State {
fn push_agent(&mut self, handles: AgentHandles) { fn push_agent(&mut self, handles: AgentHandles) {
self.agents.push((handles.0, handles.1)); self.agents.insert(handles.0, (handles.1, handles.5));
self.watchers.push(handles.2); self.watchers.push(handles.2);
self.watchers.push(handles.3); self.watchers.push(handles.3);
self.data.push(handles.4); self.data.push(handles.4);
@ -105,9 +119,7 @@ impl Drop for State {
fn drop(&mut self) { fn drop(&mut self) {
SHUTDOWN.store(true, Ordering::Relaxed); SHUTDOWN.store(true, Ordering::Relaxed);
log!("Signaling children to stop..."); log!("Signaling children to stop...");
// stop children in reverse order for (name, (mut agent, _)) in self.agents.drain() {
self.agents.reverse();
for (name, mut agent) in self.agents.drain(..) {
log!("Stopping child {}", name); log!("Stopping child {}", name);
stop_child(&mut agent); stop_child(&mut agent);
} }
@ -122,6 +134,7 @@ impl Drop for State {
drop(data) drop(data)
} }
fs::remove_dir_all(SOLANA_CHECKPOINT_LOCATION).unwrap_or_default(); fs::remove_dir_all(SOLANA_CHECKPOINT_LOCATION).unwrap_or_default();
fs::remove_dir_all::<&Path>(AGENT_LOGGING_DIR.as_ref()).unwrap_or_default();
} }
} }
@ -133,20 +146,27 @@ fn main() -> ExitCode {
}) })
.unwrap(); .unwrap();
assert_eq!(VALIDATOR_ORIGIN_CHAINS.len(), VALIDATOR_KEYS.len());
const VALIDATOR_COUNT: usize = VALIDATOR_KEYS.len();
let config = Config::load(); let config = Config::load();
let mut validator_origin_chains = ["test1", "test2", "test3"].to_vec();
let solana_checkpoint_path = Path::new(SOLANA_CHECKPOINT_LOCATION); let mut validator_keys = ETH_VALIDATOR_KEYS.to_vec();
fs::remove_dir_all(solana_checkpoint_path).unwrap_or_default(); let mut validator_count: usize = validator_keys.len();
let checkpoints_dirs: Vec<DynPath> = (0..VALIDATOR_COUNT - 1) let mut checkpoints_dirs: Vec<DynPath> = (0..validator_count)
.map(|_| Box::new(tempdir().unwrap()) as DynPath) .map(|_| Box::new(tempdir().unwrap()) as DynPath)
.chain([Box::new(solana_checkpoint_path) as DynPath])
.collect(); .collect();
if config.sealevel_enabled {
validator_origin_chains.push("sealeveltest1");
let mut sealevel_keys = SEALEVEL_VALIDATOR_KEYS.to_vec();
validator_keys.append(&mut sealevel_keys);
let solana_checkpoint_path = Path::new(SOLANA_CHECKPOINT_LOCATION);
fs::remove_dir_all(solana_checkpoint_path).unwrap_or_default();
checkpoints_dirs.push(Box::new(solana_checkpoint_path) as DynPath);
validator_count += 1;
}
assert_eq!(validator_origin_chains.len(), validator_keys.len());
let rocks_db_dir = tempdir().unwrap(); let rocks_db_dir = tempdir().unwrap();
let relayer_db = concat_path(&rocks_db_dir, "relayer"); let relayer_db = concat_path(&rocks_db_dir, "relayer");
let validator_dbs = (0..VALIDATOR_COUNT) let validator_dbs = (0..validator_count)
.map(|i| concat_path(&rocks_db_dir, format!("validator{i}"))) .map(|i| concat_path(&rocks_db_dir, format!("validator{i}")))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -200,15 +220,6 @@ fn main() -> ExitCode {
r#"[{ r#"[{
"type": "minimum", "type": "minimum",
"payment": "1", "payment": "1",
"matchingList": [
{
"originDomain": ["13375","13376"],
"destinationDomain": ["13375","13376"]
}
]
},
{
"type": "none"
}]"#, }]"#,
) )
.arg( .arg(
@ -216,11 +227,15 @@ fn main() -> ExitCode {
"http://127.0.0.1:8545,http://127.0.0.1:8545,http://127.0.0.1:8545", "http://127.0.0.1:8545,http://127.0.0.1:8545,http://127.0.0.1:8545",
) )
// default is used for TEST3 // default is used for TEST3
.arg("defaultSigner.key", RELAYER_KEYS[2]) .arg("defaultSigner.key", RELAYER_KEYS[2]);
.arg( let relayer_env = if config.sealevel_enabled {
relayer_env.arg(
"relayChains", "relayChains",
"test1,test2,test3,sealeveltest1,sealeveltest2", "test1,test2,test3,sealeveltest1,sealeveltest2",
); )
} else {
relayer_env.arg("relayChains", "test1,test2,test3")
};
let base_validator_env = common_agent_env let base_validator_env = common_agent_env
.clone() .clone()
@ -242,14 +257,14 @@ fn main() -> ExitCode {
.hyp_env("INTERVAL", "5") .hyp_env("INTERVAL", "5")
.hyp_env("CHECKPOINTSYNCER_TYPE", "localStorage"); .hyp_env("CHECKPOINTSYNCER_TYPE", "localStorage");
let validator_envs = (0..VALIDATOR_COUNT) let validator_envs = (0..validator_count)
.map(|i| { .map(|i| {
base_validator_env base_validator_env
.clone() .clone()
.hyp_env("METRICSPORT", (9094 + i).to_string()) .hyp_env("METRICSPORT", (9094 + i).to_string())
.hyp_env("DB", validator_dbs[i].to_str().unwrap()) .hyp_env("DB", validator_dbs[i].to_str().unwrap())
.hyp_env("ORIGINCHAINNAME", VALIDATOR_ORIGIN_CHAINS[i]) .hyp_env("ORIGINCHAINNAME", validator_origin_chains[i])
.hyp_env("VALIDATOR_KEY", VALIDATOR_KEYS[i]) .hyp_env("VALIDATOR_KEY", validator_keys[i])
.hyp_env( .hyp_env(
"CHECKPOINTSYNCER_PATH", "CHECKPOINTSYNCER_PATH",
(*checkpoints_dirs[i]).as_ref().to_str().unwrap(), (*checkpoints_dirs[i]).as_ref().to_str().unwrap(),
@ -283,7 +298,7 @@ fn main() -> ExitCode {
.join(", ") .join(", ")
); );
log!("Relayer DB in {}", relayer_db.display()); log!("Relayer DB in {}", relayer_db.display());
(0..VALIDATOR_COUNT).for_each(|i| { (0..validator_count).for_each(|i| {
log!("Validator {} DB in {}", i + 1, validator_dbs[i].display()); log!("Validator {} DB in {}", i + 1, validator_dbs[i].display());
}); });
@ -291,9 +306,14 @@ fn main() -> ExitCode {
// Ready to run... // Ready to run...
// //
let solana_paths = if config.sealevel_enabled {
let (solana_path, solana_path_tempdir) = install_solana_cli_tools().join(); let (solana_path, solana_path_tempdir) = install_solana_cli_tools().join();
state.data.push(Box::new(solana_path_tempdir)); state.data.push(Box::new(solana_path_tempdir));
let solana_program_builder = build_solana_programs(solana_path.clone()); let solana_program_builder = build_solana_programs(solana_path.clone());
Some((solana_program_builder.join(), solana_path))
} else {
None
};
// this task takes a long time in the CI so run it in parallel // this task takes a long time in the CI so run it in parallel
log!("Building rust..."); log!("Building rust...");
@ -303,15 +323,18 @@ fn main() -> ExitCode {
.arg("bin", "relayer") .arg("bin", "relayer")
.arg("bin", "validator") .arg("bin", "validator")
.arg("bin", "scraper") .arg("bin", "scraper")
.arg("bin", "init-db") .arg("bin", "init-db");
.arg("bin", "hyperlane-sealevel-client") let build_rust = if config.sealevel_enabled {
build_rust.arg("bin", "hyperlane-sealevel-client")
} else {
build_rust
};
let build_rust = build_rust
.filter_logs(|l| !l.contains("workspace-inheritance")) .filter_logs(|l| !l.contains("workspace-inheritance"))
.run(); .run();
let start_anvil = start_anvil(config.clone()); let start_anvil = start_anvil(config.clone());
let solana_program_path = solana_program_builder.join();
log!("Running postgres db..."); log!("Running postgres db...");
let postgres = Program::new("docker") let postgres = Program::new("docker")
.cmd("run") .cmd("run")
@ -320,12 +343,14 @@ fn main() -> ExitCode {
.arg("env", "POSTGRES_PASSWORD=47221c18c610") .arg("env", "POSTGRES_PASSWORD=47221c18c610")
.arg("publish", "5432:5432") .arg("publish", "5432:5432")
.cmd("postgres:14") .cmd("postgres:14")
.spawn("SQL"); .spawn("SQL", None);
state.push_agent(postgres); state.push_agent(postgres);
build_rust.join(); build_rust.join();
let solana_ledger_dir = tempdir().unwrap(); let solana_ledger_dir = tempdir().unwrap();
let solana_config_path = if let Some((solana_program_path, solana_path)) = solana_paths.clone()
{
let start_solana_validator = start_solana_test_validator( let start_solana_validator = start_solana_test_validator(
solana_path.clone(), solana_path.clone(),
solana_program_path, solana_program_path,
@ -334,10 +359,15 @@ fn main() -> ExitCode {
let (solana_config_path, solana_validator) = start_solana_validator.join(); let (solana_config_path, solana_validator) = start_solana_validator.join();
state.push_agent(solana_validator); state.push_agent(solana_validator);
Some(solana_config_path)
} else {
None
};
state.push_agent(start_anvil.join()); state.push_agent(start_anvil.join());
// spawn 1st validator before any messages have been sent to test empty mailbox // spawn 1st validator before any messages have been sent to test empty mailbox
state.push_agent(validator_envs.first().unwrap().clone().spawn("VL1")); state.push_agent(validator_envs.first().unwrap().clone().spawn("VL1", None));
sleep(Duration::from_secs(5)); sleep(Duration::from_secs(5));
@ -345,7 +375,7 @@ fn main() -> ExitCode {
Program::new(concat_path(AGENT_BIN_PATH, "init-db")) Program::new(concat_path(AGENT_BIN_PATH, "init-db"))
.run() .run()
.join(); .join();
state.push_agent(scraper_env.spawn("SCR")); state.push_agent(scraper_env.spawn("SCR", None));
// Send half the kathy messages before starting the rest of the agents // Send half the kathy messages before starting the rest of the agents
let kathy_env_single_insertion = Program::new("yarn") let kathy_env_single_insertion = Program::new("yarn")
@ -378,22 +408,35 @@ fn main() -> ExitCode {
.arg("required-hook", "merkleTreeHook"); .arg("required-hook", "merkleTreeHook");
kathy_env_double_insertion.clone().run().join(); kathy_env_double_insertion.clone().run().join();
if let Some((solana_config_path, (_, solana_path))) =
solana_config_path.clone().zip(solana_paths.clone())
{
// Send some sealevel messages before spinning up the agents, to test the backward indexing cursor // Send some sealevel messages before spinning up the agents, to test the backward indexing cursor
for _i in 0..(SOL_MESSAGES_EXPECTED / 2) { for _i in 0..(SOL_MESSAGES_EXPECTED / 2) {
initiate_solana_hyperlane_transfer(solana_path.clone(), solana_config_path.clone()).join(); initiate_solana_hyperlane_transfer(solana_path.clone(), solana_config_path.clone())
.join();
}
} }
// spawn the rest of the validators // spawn the rest of the validators
for (i, validator_env) in validator_envs.into_iter().enumerate().skip(1) { for (i, validator_env) in validator_envs.into_iter().enumerate().skip(1) {
let validator = validator_env.spawn(make_static(format!("VL{}", 1 + i))); let validator = validator_env.spawn(
make_static(format!("VL{}", 1 + i)),
Some(AGENT_LOGGING_DIR.as_ref()),
);
state.push_agent(validator); state.push_agent(validator);
} }
state.push_agent(relayer_env.spawn("RLY")); state.push_agent(relayer_env.spawn("RLY", Some(&AGENT_LOGGING_DIR)));
// Send some sealevel messages after spinning up the relayer, to test the forward indexing cursor if let Some((solana_config_path, (_, solana_path))) =
solana_config_path.clone().zip(solana_paths.clone())
{
// Send some sealevel messages before spinning up the agents, to test the backward indexing cursor
for _i in 0..(SOL_MESSAGES_EXPECTED / 2) { for _i in 0..(SOL_MESSAGES_EXPECTED / 2) {
initiate_solana_hyperlane_transfer(solana_path.clone(), solana_config_path.clone()).join(); initiate_solana_hyperlane_transfer(solana_path.clone(), solana_config_path.clone())
.join();
}
} }
log!("Setup complete! Agents running in background..."); log!("Setup complete! Agents running in background...");
@ -402,7 +445,11 @@ fn main() -> ExitCode {
// Send half the kathy messages after the relayer comes up // Send half the kathy messages after the relayer comes up
kathy_env_double_insertion.clone().run().join(); kathy_env_double_insertion.clone().run().join();
kathy_env_zero_insertion.clone().run().join(); kathy_env_zero_insertion.clone().run().join();
state.push_agent(kathy_env_single_insertion.flag("mineforever").spawn("KTY")); state.push_agent(
kathy_env_single_insertion
.flag("mineforever")
.spawn("KTY", None),
);
let loop_start = Instant::now(); let loop_start = Instant::now();
// give things a chance to fully start. // give things a chance to fully start.
@ -412,12 +459,14 @@ fn main() -> ExitCode {
while !SHUTDOWN.load(Ordering::Relaxed) { while !SHUTDOWN.load(Ordering::Relaxed) {
if config.ci_mode { if config.ci_mode {
// for CI we have to look for the end condition. // for CI we have to look for the end condition.
// if termination_invariants_met(&config, starting_relayer_balance)
if termination_invariants_met( if termination_invariants_met(
&config, &config,
starting_relayer_balance, starting_relayer_balance,
&solana_path, solana_paths
&solana_config_path, .clone()
.map(|(_, solana_path)| solana_path)
.as_deref(),
solana_config_path.as_deref(),
) )
.unwrap_or(false) .unwrap_or(false)
{ {
@ -432,7 +481,7 @@ fn main() -> ExitCode {
} }
// verify long-running tasks are still running // verify long-running tasks are still running
for (name, child) in state.agents.iter_mut() { for (name, (child, _)) in state.agents.iter_mut() {
if let Some(status) = child.try_wait().unwrap() { if let Some(status) = child.try_wait().unwrap() {
if !status.success() { if !status.success() {
log!( log!(

@ -2,14 +2,14 @@ use std::{
collections::BTreeMap, collections::BTreeMap,
ffi::OsStr, ffi::OsStr,
fmt::{Debug, Display, Formatter}, fmt::{Debug, Display, Formatter},
io::{BufRead, BufReader, Read}, fs::{File, OpenOptions},
io::{BufRead, BufReader, Read, Write},
path::{Path, PathBuf}, path::{Path, PathBuf},
process::{Command, Stdio}, process::{Command, Stdio},
sync::{ sync::{
atomic::{AtomicBool, Ordering}, atomic::{AtomicBool, Ordering},
mpsc, mpsc::{self, Sender},
mpsc::Sender, Arc, Mutex,
Arc,
}, },
thread::{sleep, spawn}, thread::{sleep, spawn},
time::Duration, time::Duration,
@ -240,8 +240,18 @@ impl Program {
}) })
} }
pub fn spawn(self, log_prefix: &'static str) -> AgentHandles { pub fn spawn(self, log_prefix: &'static str, logs_dir: Option<&Path>) -> AgentHandles {
let mut command = self.create_command(); let mut command = self.create_command();
let log_file = logs_dir.map(|logs_dir| {
let log_file_name = format!("{}-output.log", log_prefix);
let log_file_path = logs_dir.join(log_file_name);
let log_file = OpenOptions::new()
.append(true)
.create(true)
.open(log_file_path)
.expect("Failed to create a log file");
Arc::new(Mutex::new(log_file))
});
command.stdout(Stdio::piped()).stderr(Stdio::piped()); command.stdout(Stdio::piped()).stderr(Stdio::piped());
log!("Spawning {}...", &self); log!("Spawning {}...", &self);
@ -250,17 +260,35 @@ impl Program {
.unwrap_or_else(|e| panic!("Failed to start {:?} with error: {e}", &self)); .unwrap_or_else(|e| panic!("Failed to start {:?} with error: {e}", &self));
let child_stdout = child.stdout.take().unwrap(); let child_stdout = child.stdout.take().unwrap();
let filter = self.get_filter(); let filter = self.get_filter();
let stdout = let cloned_log_file = log_file.clone();
spawn(move || prefix_log(child_stdout, log_prefix, &RUN_LOG_WATCHERS, filter, None)); let stdout = spawn(move || {
prefix_log(
child_stdout,
log_prefix,
&RUN_LOG_WATCHERS,
filter,
cloned_log_file,
None,
)
});
let child_stderr = child.stderr.take().unwrap(); let child_stderr = child.stderr.take().unwrap();
let stderr = let stderr = spawn(move || {
spawn(move || prefix_log(child_stderr, log_prefix, &RUN_LOG_WATCHERS, filter, None)); prefix_log(
child_stderr,
log_prefix,
&RUN_LOG_WATCHERS,
filter,
None,
None,
)
});
( (
log_prefix.to_owned(), log_prefix.to_owned(),
child, child,
Box::new(SimpleTaskHandle(stdout)), Box::new(SimpleTaskHandle(stdout)),
Box::new(SimpleTaskHandle(stderr)), Box::new(SimpleTaskHandle(stderr)),
self.get_memory(), self.get_memory(),
log_file.clone(),
) )
} }
@ -281,13 +309,13 @@ impl Program {
let stdout = child.stdout.take().unwrap(); let stdout = child.stdout.take().unwrap();
let name = self.get_bin_name(); let name = self.get_bin_name();
let running = running.clone(); let running = running.clone();
spawn(move || prefix_log(stdout, &name, &running, filter, stdout_ch_tx)) spawn(move || prefix_log(stdout, &name, &running, filter, None, stdout_ch_tx))
}; };
let stderr = { let stderr = {
let stderr = child.stderr.take().unwrap(); let stderr = child.stderr.take().unwrap();
let name = self.get_bin_name(); let name = self.get_bin_name();
let running = running.clone(); let running = running.clone();
spawn(move || prefix_log(stderr, &name, &running, filter, None)) spawn(move || prefix_log(stderr, &name, &running, filter, None, None))
}; };
let status = loop { let status = loop {
@ -321,6 +349,7 @@ fn prefix_log(
prefix: &str, prefix: &str,
run_log_watcher: &AtomicBool, run_log_watcher: &AtomicBool,
filter: Option<LogFilter>, filter: Option<LogFilter>,
file: Option<Arc<Mutex<File>>>,
channel: Option<Sender<String>>, channel: Option<Sender<String>>,
) { ) {
let mut reader = BufReader::new(output).lines(); let mut reader = BufReader::new(output).lines();
@ -340,6 +369,10 @@ fn prefix_log(
} }
} }
println!("<{prefix}> {line}"); println!("<{prefix}> {line}");
if let Some(file) = &file {
let mut writer = file.lock().expect("Failed to acquire lock for log file");
writeln!(writer, "{}", line).unwrap_or(());
}
if let Some(channel) = &channel { if let Some(channel) = &channel {
// ignore send errors // ignore send errors
channel.send(line).unwrap_or(()); channel.send(line).unwrap_or(());

@ -202,7 +202,7 @@ pub fn start_solana_test_validator(
concat_path(&solana_programs_path, lib).to_str().unwrap(), concat_path(&solana_programs_path, lib).to_str().unwrap(),
); );
} }
let validator = args.spawn("SOL"); let validator = args.spawn("SOL", None);
sleep(Duration::from_secs(5)); sleep(Duration::from_secs(5));
log!("Deploying the hyperlane programs to solana"); log!("Deploying the hyperlane programs to solana");

@ -1,5 +1,8 @@
use std::fs::File;
use std::io::{self, BufRead};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::Child; use std::process::Child;
use std::sync::{Arc, Mutex};
use std::thread::JoinHandle; use std::thread::JoinHandle;
use nix::libc::pid_t; use nix::libc::pid_t;
@ -54,6 +57,8 @@ pub type AgentHandles = (
Box<dyn TaskHandle<Output = ()>>, Box<dyn TaskHandle<Output = ()>>,
// data to drop once program exits // data to drop once program exits
Box<dyn ArbitraryData>, Box<dyn ArbitraryData>,
// file with stdout logs
Option<Arc<Mutex<File>>>,
); );
pub type LogFilter = fn(&str) -> bool; pub type LogFilter = fn(&str) -> bool;
@ -112,3 +117,16 @@ pub fn stop_child(child: &mut Child) {
} }
}; };
} }
pub fn get_matching_lines(file: &File, search_string: &str) -> io::Result<Vec<String>> {
let reader = io::BufReader::new(file);
// Read lines and collect those that contain the search string
let matching_lines: Vec<String> = reader
.lines()
.map_while(Result::ok)
.filter(|line| line.contains(search_string))
.collect();
Ok(matching_lines)
}

@ -1,28 +1,22 @@
# @hyperlane-xyz/core # @hyperlane-xyz/core
## 4.0.0-alpha.2 ## 3.15.0
### Patch Changes ### Minor Changes
- @hyperlane-xyz/utils@4.0.0-alpha.2
## 4.0.0-alpha.0 - 51bfff683: Mint/burn limit checking for xERC20 bridging
Corrects CLI output for HypXERC20 and HypXERC20Lockbox deployments
### Patch Changes ### Patch Changes
- @hyperlane-xyz/utils@4.0.0-alpha.0 - @hyperlane-xyz/utils@3.15.0
## 4.0.0
### Major Changes
- 74c879fa1: Merge branch 'cli-2.0' into main. ## 3.14.0
### Patch Changes ### Patch Changes
- Updated dependencies [341b8affd] - a8a68f6f6: fix: make XERC20 and XERC20 Lockbox proxy-able
- Updated dependencies [74c879fa1] - @hyperlane-xyz/utils@3.14.0
- @hyperlane-xyz/utils@4.0.0-alpha
## 3.13.0 ## 3.13.0

@ -43,13 +43,13 @@ abstract contract GasRouter is Router {
*/ */
function quoteGasPayment( function quoteGasPayment(
uint32 _destinationDomain uint32 _destinationDomain
) external view returns (uint256 _gasPayment) { ) external view returns (uint256) {
return _quoteDispatch(_destinationDomain, ""); return _GasRouter_quoteDispatch(_destinationDomain, "", address(hook));
} }
function _metadata( function _GasRouter_hookMetadata(
uint32 _destination uint32 _destination
) internal view virtual override returns (bytes memory) { ) internal view returns (bytes memory) {
return return
StandardHookMetadata.overrideGasLimit(destinationGas[_destination]); StandardHookMetadata.overrideGasLimit(destinationGas[_destination]);
} }
@ -57,4 +57,34 @@ abstract contract GasRouter is Router {
function _setDestinationGas(uint32 domain, uint256 gas) internal { function _setDestinationGas(uint32 domain, uint256 gas) internal {
destinationGas[domain] = gas; destinationGas[domain] = gas;
} }
function _GasRouter_dispatch(
uint32 _destination,
uint256 _value,
bytes memory _messageBody,
address _hook
) internal returns (bytes32) {
return
_Router_dispatch(
_destination,
_value,
_messageBody,
_GasRouter_hookMetadata(_destination),
_hook
);
}
function _GasRouter_quoteDispatch(
uint32 _destination,
bytes memory _messageBody,
address _hook
) internal view returns (uint256) {
return
_Router_quoteDispatch(
_destination,
_messageBody,
_GasRouter_hookMetadata(_destination),
_hook
);
}
} }

@ -95,85 +95,4 @@ abstract contract MailboxClient is OwnableUpgradeable {
function _isDelivered(bytes32 id) internal view returns (bool) { function _isDelivered(bytes32 id) internal view returns (bool) {
return mailbox.delivered(id); return mailbox.delivered(id);
} }
function _metadata(
uint32 /*_destinationDomain*/
) internal view virtual returns (bytes memory) {
return "";
}
function _dispatch(
uint32 _destinationDomain,
bytes32 _recipient,
bytes memory _messageBody
) internal virtual returns (bytes32) {
return
_dispatch(_destinationDomain, _recipient, msg.value, _messageBody);
}
function _dispatch(
uint32 _destinationDomain,
bytes32 _recipient,
uint256 _value,
bytes memory _messageBody
) internal virtual returns (bytes32) {
return
mailbox.dispatch{value: _value}(
_destinationDomain,
_recipient,
_messageBody,
_metadata(_destinationDomain),
hook
);
}
function _dispatch(
uint32 _destinationDomain,
bytes32 _recipient,
uint256 _value,
bytes memory _messageBody,
bytes memory _hookMetadata,
IPostDispatchHook _hook
) internal virtual returns (bytes32) {
return
mailbox.dispatch{value: _value}(
_destinationDomain,
_recipient,
_messageBody,
_hookMetadata,
_hook
);
}
function _quoteDispatch(
uint32 _destinationDomain,
bytes32 _recipient,
bytes memory _messageBody
) internal view virtual returns (uint256) {
return
mailbox.quoteDispatch(
_destinationDomain,
_recipient,
_messageBody,
_metadata(_destinationDomain),
hook
);
}
function _quoteDispatch(
uint32 _destinationDomain,
bytes32 _recipient,
bytes memory _messageBody,
bytes calldata _hookMetadata,
IPostDispatchHook _hook
) internal view virtual returns (uint256) {
return
mailbox.quoteDispatch(
_destinationDomain,
_recipient,
_messageBody,
_hookMetadata,
_hook
);
}
} }

@ -167,28 +167,73 @@ abstract contract Router is MailboxClient, IMessageRecipient {
); );
} }
function _dispatch( function _Router_dispatch(
uint32 _destinationDomain, uint32 _destinationDomain,
bytes memory _messageBody uint256 _value,
) internal virtual returns (bytes32) { bytes memory _messageBody,
return _dispatch(_destinationDomain, msg.value, _messageBody); bytes memory _hookMetadata,
address _hook
) internal returns (bytes32) {
bytes32 _router = _mustHaveRemoteRouter(_destinationDomain);
return
mailbox.dispatch{value: _value}(
_destinationDomain,
_router,
_messageBody,
_hookMetadata,
IPostDispatchHook(_hook)
);
} }
/**
* DEPRECATED: Use `_Router_dispatch` instead
* @dev For backward compatibility with v2 client contracts
*/
function _dispatch( function _dispatch(
uint32 _destinationDomain, uint32 _destinationDomain,
uint256 _value,
bytes memory _messageBody bytes memory _messageBody
) internal virtual returns (bytes32) { ) internal returns (bytes32) {
return
_Router_dispatch(
_destinationDomain,
msg.value,
_messageBody,
"",
address(hook)
);
}
function _Router_quoteDispatch(
uint32 _destinationDomain,
bytes memory _messageBody,
bytes memory _hookMetadata,
address _hook
) internal view returns (uint256) {
bytes32 _router = _mustHaveRemoteRouter(_destinationDomain); bytes32 _router = _mustHaveRemoteRouter(_destinationDomain);
return return
super._dispatch(_destinationDomain, _router, _value, _messageBody); mailbox.quoteDispatch(
_destinationDomain,
_router,
_messageBody,
_hookMetadata,
IPostDispatchHook(_hook)
);
} }
/**
* DEPRECATED: Use `_Router_quoteDispatch` instead
* @dev For backward compatibility with v2 client contracts
*/
function _quoteDispatch( function _quoteDispatch(
uint32 _destinationDomain, uint32 _destinationDomain,
bytes memory _messageBody bytes memory _messageBody
) internal view virtual returns (uint256) { ) internal view returns (uint256) {
bytes32 _router = _mustHaveRemoteRouter(_destinationDomain); return
return super._quoteDispatch(_destinationDomain, _router, _messageBody); _Router_quoteDispatch(
_destinationDomain,
_messageBody,
"",
address(hook)
);
} }
} }

@ -3,6 +3,7 @@ pragma solidity >=0.8.0;
import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
import "../token/interfaces/IXERC20Lockbox.sol";
import "../token/interfaces/IXERC20.sol"; import "../token/interfaces/IXERC20.sol";
import "../token/interfaces/IFiatToken.sol"; import "../token/interfaces/IFiatToken.sol";
@ -66,15 +67,62 @@ contract XERC20Test is ERC20Test, IXERC20 {
_burn(account, amount); _burn(account, amount);
} }
function setLimits( function setLimits(address, uint256, uint256) external pure {
address /* _bridge */, assert(false);
uint256 /* _mintingLimit */,
uint256 /* _burningLimit */
) external pure {
require(false, "setLimits(): not implemented");
} }
function owner() external pure returns (address) { function owner() external pure returns (address) {
return address(0x0); return address(0x0);
} }
function burningCurrentLimitOf(
address _bridge
) external view returns (uint256) {
return type(uint256).max;
}
function mintingCurrentLimitOf(
address _bridge
) external view returns (uint256) {
return type(uint256).max;
}
}
contract XERC20LockboxTest is IXERC20Lockbox {
IXERC20 public immutable XERC20;
IERC20 public immutable ERC20;
constructor(
string memory name,
string memory symbol,
uint256 totalSupply,
uint8 __decimals
) {
ERC20Test erc20 = new ERC20Test(name, symbol, totalSupply, __decimals);
erc20.transfer(msg.sender, totalSupply);
ERC20 = erc20;
XERC20 = new XERC20Test(name, symbol, 0, __decimals);
}
function depositTo(address _user, uint256 _amount) public {
ERC20.transferFrom(msg.sender, address(this), _amount);
XERC20.mint(_user, _amount);
}
function deposit(uint256 _amount) external {
depositTo(msg.sender, _amount);
}
function depositNativeTo(address) external payable {
assert(false);
}
function withdrawTo(address _user, uint256 _amount) public {
XERC20.burn(msg.sender, _amount);
ERC20Test(address(ERC20)).mintTo(_user, _amount);
}
function withdraw(uint256 _amount) external {
withdrawTo(msg.sender, _amount);
}
} }

@ -7,7 +7,7 @@ contract TestGasRouter is GasRouter {
constructor(address _mailbox) GasRouter(_mailbox) {} constructor(address _mailbox) GasRouter(_mailbox) {}
function dispatch(uint32 _destination, bytes memory _msg) external payable { function dispatch(uint32 _destination, bytes memory _msg) external payable {
_dispatch(_destination, _msg); _GasRouter_dispatch(_destination, msg.value, _msg, address(hook));
} }
function _handle(uint32, bytes32, bytes calldata) internal pure override {} function _handle(uint32, bytes32, bytes calldata) internal pure override {}

@ -36,24 +36,16 @@ contract HypNative is TokenRouter {
/** /**
* @inheritdoc TokenRouter * @inheritdoc TokenRouter
* @dev uses (`msg.value` - `_amount`) as interchain gas payment and `msg.sender` as refund address. * @dev uses (`msg.value` - `_amount`) as hook payment and `msg.sender` as refund address.
*/ */
function transferRemote( function transferRemote(
uint32 _destination, uint32 _destination,
bytes32 _recipient, bytes32 _recipient,
uint256 _amount uint256 _amount
) public payable virtual override returns (bytes32 messageId) { ) external payable virtual override returns (bytes32 messageId) {
require(msg.value >= _amount, "Native: amount exceeds msg.value"); require(msg.value >= _amount, "Native: amount exceeds msg.value");
uint256 gasPayment = msg.value - _amount; uint256 _hookPayment = msg.value - _amount;
return return _transferRemote(_destination, _recipient, _amount, _hookPayment);
_transferRemote(
_destination,
_recipient,
_amount,
gasPayment,
bytes(""),
address(0)
);
} }
function balanceOf( function balanceOf(

@ -6,7 +6,7 @@ For instructions on deploying Warp Routes, see [the deployment documentation](ht
## Warp Route Architecture ## Warp Route Architecture
A Warp Route is a collection of [`TokenRouter`](./contracts/libs/TokenRouter.sol) contracts deployed across a set of Hyperlane chains. These contracts leverage the `Router` pattern to implement access control and routing logic for remote token transfers. These contracts send and receive [`Messages`](./contracts/libs/Message.sol) which encode payloads containing a transfer `amount` and `recipient` address. A Warp Route is a collection of [`TokenRouter`](./libs/TokenRouter.sol) contracts deployed across a set of Hyperlane chains. These contracts leverage the `Router` pattern to implement access control and routing logic for remote token transfers. These contracts send and receive [`Messages`](./libs/TokenMessage.sol) which encode payloads containing a transfer `amount` and `recipient` address.
```mermaid ```mermaid
%%{ init: { %%{ init: {
@ -39,7 +39,7 @@ graph LR
Mailbox_G[(Mailbox)] Mailbox_G[(Mailbox)]
end end
HYP_E -. "router" .- HYP_P -. "router" .- HYP_G HYP_E -. "TokenMessage" .- HYP_P -. "TokenMessage" .- HYP_G
``` ```

@ -25,18 +25,16 @@ contract HypNativeScaled is HypNative {
uint32 _destination, uint32 _destination,
bytes32 _recipient, bytes32 _recipient,
uint256 _amount uint256 _amount
) public payable override returns (bytes32 messageId) { ) external payable override returns (bytes32 messageId) {
require(msg.value >= _amount, "Native: amount exceeds msg.value"); require(msg.value >= _amount, "Native: amount exceeds msg.value");
uint256 gasPayment = msg.value - _amount; uint256 _hookPayment = msg.value - _amount;
uint256 scaledAmount = _amount / scale; uint256 _scaledAmount = _amount / scale;
return return
_transferRemote( _transferRemote(
_destination, _destination,
_recipient, _recipient,
scaledAmount, _scaledAmount,
gasPayment, _hookPayment
bytes(""),
address(0)
); );
} }

@ -8,7 +8,9 @@ contract HypXERC20 is HypERC20Collateral {
constructor( constructor(
address _xerc20, address _xerc20,
address _mailbox address _mailbox
) HypERC20Collateral(_xerc20, _mailbox) {} ) HypERC20Collateral(_xerc20, _mailbox) {
_disableInitializers();
}
function _transferFromSender( function _transferFromSender(
uint256 _amountOrId uint256 _amountOrId

@ -17,18 +17,40 @@ contract HypXERC20Lockbox is HypERC20Collateral {
) HypERC20Collateral(address(IXERC20Lockbox(_lockbox).ERC20()), _mailbox) { ) HypERC20Collateral(address(IXERC20Lockbox(_lockbox).ERC20()), _mailbox) {
lockbox = IXERC20Lockbox(_lockbox); lockbox = IXERC20Lockbox(_lockbox);
xERC20 = lockbox.XERC20(); xERC20 = lockbox.XERC20();
approveLockbox();
_disableInitializers();
}
// grant infinite approvals to lockbox /**
* @notice Approve the lockbox to spend the wrapped token and xERC20
* @dev This function is idempotent and need not be access controlled
*/
function approveLockbox() public {
require( require(
IERC20(wrappedToken).approve(_lockbox, MAX_INT), IERC20(wrappedToken).approve(address(lockbox), MAX_INT),
"erc20 lockbox approve failed" "erc20 lockbox approve failed"
); );
require( require(
xERC20.approve(_lockbox, MAX_INT), xERC20.approve(address(lockbox), MAX_INT),
"xerc20 lockbox approve failed" "xerc20 lockbox approve failed"
); );
} }
/**
* @notice Initialize the contract
* @param _hook The address of the hook contract
* @param _ism The address of the interchain security module
* @param _owner The address of the owner
*/
function initialize(
address _hook,
address _ism,
address _owner
) public override initializer {
approveLockbox();
_MailboxClient_initialize(_hook, _ism, _owner);
}
function _transferFromSender( function _transferFromSender(
uint256 _amount uint256 _amount
) internal override returns (bytes memory) { ) internal override returns (bytes memory) {

@ -36,4 +36,22 @@ interface IXERC20 is IERC20 {
) external; ) external;
function owner() external returns (address); function owner() external returns (address);
/**
* @notice Returns the current limit of a bridge
* @param _bridge the bridge we are viewing the limits of
* @return _limit The limit the bridge has
*/
function burningCurrentLimitOf(
address _bridge
) external view returns (uint256 _limit);
/**
* @notice Returns the current limit of a bridge
* @param _bridge the bridge we are viewing the limits of
* @return _limit The limit the bridge has
*/
function mintingCurrentLimitOf(
address _bridge
) external view returns (uint256 _limit);
} }

@ -109,9 +109,11 @@ abstract contract FastTokenRouter is TokenRouter {
_fastTransferId _fastTransferId
); );
messageId = _dispatch( messageId = _GasRouter_dispatch(
_destination, _destination,
TokenMessage.format(_recipient, _amountOrId, metadata) msg.value,
TokenMessage.format(_recipient, _amountOrId, metadata),
address(hook)
); );
emit SentTransferRemote(_destination, _recipient, _amountOrId); emit SentTransferRemote(_destination, _recipient, _amountOrId);
} }

@ -57,14 +57,7 @@ abstract contract TokenRouter is GasRouter {
uint256 _amountOrId uint256 _amountOrId
) external payable virtual returns (bytes32 messageId) { ) external payable virtual returns (bytes32 messageId) {
return return
_transferRemote( _transferRemote(_destination, _recipient, _amountOrId, msg.value);
_destination,
_recipient,
_amountOrId,
msg.value,
bytes(""),
address(0)
);
} }
/** /**
@ -97,45 +90,45 @@ abstract contract TokenRouter is GasRouter {
); );
} }
/**
* @notice Transfers `_amountOrId` token to `_recipient` on `_destination` domain.
* @dev Delegates transfer logic to `_transferFromSender` implementation.
* @dev The metadata is the token metadata, and is DIFFERENT than the hook metadata.
* @dev Emits `SentTransferRemote` event on the origin chain.
* @param _destination The identifier of the destination chain.
* @param _recipient The address of the recipient on the destination chain.
* @param _amountOrId The amount or identifier of tokens to be sent to the remote recipient.
* @param _gasPayment The amount of native token to pay for interchain gas.
* @param _hookMetadata The metadata passed into the hook
* @param _hook The post dispatch hook to be called by the Mailbox
* @return messageId The identifier of the dispatched message.
*/
function _transferRemote( function _transferRemote(
uint32 _destination, uint32 _destination,
bytes32 _recipient, bytes32 _recipient,
uint256 _amountOrId, uint256 _amountOrId,
uint256 _gasPayment, uint256 _value
bytes memory _hookMetadata,
address _hook
) internal returns (bytes32 messageId) { ) internal returns (bytes32 messageId) {
bytes memory metadata = _transferFromSender(_amountOrId); return
_transferRemote(
if (address(_hook) == address(0)) {
messageId = _dispatch(
_destination, _destination,
_gasPayment, _recipient,
TokenMessage.format(_recipient, _amountOrId, metadata) _amountOrId,
_value,
_GasRouter_hookMetadata(_destination),
address(hook)
); );
} else { }
messageId = _dispatch(
_destination, function _transferRemote(
uint32 _destination,
bytes32 _recipient,
uint256 _amountOrId,
uint256 _value,
bytes memory _hookMetadata,
address _hook
) internal virtual returns (bytes32 messageId) {
bytes memory _tokenMetadata = _transferFromSender(_amountOrId);
bytes memory _tokenMessage = TokenMessage.format(
_recipient, _recipient,
_gasPayment, _amountOrId,
TokenMessage.format(_recipient, _amountOrId, metadata), _tokenMetadata
);
messageId = _Router_dispatch(
_destination,
_value,
_tokenMessage,
_hookMetadata, _hookMetadata,
IPostDispatchHook(_hook) _hook
); );
}
emit SentTransferRemote(_destination, _recipient, _amountOrId); emit SentTransferRemote(_destination, _recipient, _amountOrId);
} }

@ -14,7 +14,7 @@ fi
lcov --version lcov --version
# exclude FastTokenRouter until https://github.com/hyperlane-xyz/hyperlane-monorepo/issues/2806 # exclude FastTokenRouter until https://github.com/hyperlane-xyz/hyperlane-monorepo/issues/2806
EXCLUDE="*test* *mock* *node_modules* *FastHyp*" EXCLUDE="*test* *mock* *node_modules* *script* *FastHyp*"
lcov \ lcov \
--rc lcov_branch_coverage=1 \ --rc lcov_branch_coverage=1 \
--remove lcov.info $EXCLUDE \ --remove lcov.info $EXCLUDE \

@ -14,7 +14,11 @@ fs_permissions = [
{ access = "read", path = "./script/avs/"}, { access = "read", path = "./script/avs/"},
{ access = "write", path = "./fixtures" } { access = "write", path = "./fixtures" }
] ]
ignored_warnings_from = ['fx-portal'] ignored_warnings_from = [
'lib',
'test',
'contracts/test'
]
[profile.ci] [profile.ci]
verbosity = 4 verbosity = 4

@ -1,10 +1,10 @@
{ {
"name": "@hyperlane-xyz/core", "name": "@hyperlane-xyz/core",
"description": "Core solidity contracts for Hyperlane", "description": "Core solidity contracts for Hyperlane",
"version": "4.0.0-alpha.2", "version": "3.15.0",
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "^0.6.0", "@eth-optimism/contracts": "^0.6.0",
"@hyperlane-xyz/utils": "4.0.0-alpha.2", "@hyperlane-xyz/utils": "3.15.0",
"@layerzerolabs/lz-evm-oapp-v2": "2.0.2", "@layerzerolabs/lz-evm-oapp-v2": "2.0.2",
"@openzeppelin/contracts": "^4.9.3", "@openzeppelin/contracts": "^4.9.3",
"@openzeppelin/contracts-upgradeable": "^v4.9.3", "@openzeppelin/contracts-upgradeable": "^v4.9.3",
@ -15,7 +15,9 @@
"@nomiclabs/hardhat-ethers": "^2.2.3", "@nomiclabs/hardhat-ethers": "^2.2.3",
"@nomiclabs/hardhat-waffle": "^2.0.6", "@nomiclabs/hardhat-waffle": "^2.0.6",
"@typechain/ethers-v5": "^11.1.2", "@typechain/ethers-v5": "^11.1.2",
"@typechain/ethers-v6": "^0.5.1",
"@typechain/hardhat": "^9.1.0", "@typechain/hardhat": "^9.1.0",
"@types/node": "^18.14.5",
"chai": "^4.3.6", "chai": "^4.3.6",
"ethereum-waffle": "^4.0.10", "ethereum-waffle": "^4.0.10",
"ethers": "^5.7.2", "ethers": "^5.7.2",
@ -26,6 +28,7 @@
"prettier-plugin-solidity": "^1.1.3", "prettier-plugin-solidity": "^1.1.3",
"solhint": "^4.5.4", "solhint": "^4.5.4",
"solhint-plugin-prettier": "^0.0.5", "solhint-plugin-prettier": "^0.0.5",
"solidity-bytes-utils": "^0.8.0",
"solidity-coverage": "^0.8.3", "solidity-coverage": "^0.8.3",
"ts-generator": "^0.1.1", "ts-generator": "^0.1.1",
"ts-node": "^10.8.0", "ts-node": "^10.8.0",

@ -5,14 +5,54 @@
"avsDirectory": "0x135DDa560e946695d6f155dACaFC6f1F25C1F5AF", "avsDirectory": "0x135DDa560e946695d6f155dACaFC6f1F25C1F5AF",
"paymentCoordinator": "", "paymentCoordinator": "",
"strategies": [ "strategies": [
{
"name": "swETH",
"strategy": "0x0Fe4F44beE93503346A3Ac9EE5A26b130a5796d6"
},
{
"name": "oETH",
"strategy": "0x13760F50a9d7377e4F20CB8CF9e4c26586c658ff"
},
{
"name": "rETH",
"strategy": "0x1BeE69b7dFFfA4E2d53C2a2Df135C388AD25dCD2"
},
{
"name": "mETH",
"strategy": "0x298aFB19A105D59E74658C4C334Ff360BadE6dd2"
},
{ {
"name": "cbETH", "name": "cbETH",
"strategy": "0x54945180dB7943c0ed0FEE7EdaB2Bd24620256bc" "strategy": "0x54945180dB7943c0ed0FEE7EdaB2Bd24620256bc"
}, },
{
"name": "osETH",
"strategy": "0x57ba429517c3473B6d34CA9aCd56c0e735b94c02"
},
{
"name": "wBETH",
"strategy": "0x7CA911E83dabf90C90dD3De5411a10F1A6112184"
},
{
"name": "sfrxETH",
"strategy": "0x8CA7A5d6f3acd3A7A8bC468a8CD0FB14B6BD28b6"
},
{ {
"name": "stETH", "name": "stETH",
"strategy": "0x93c4b944D05dfe6df7645A86cd2206016c51564D" "strategy": "0x93c4b944D05dfe6df7645A86cd2206016c51564D"
}, },
{
"name": "ETHx",
"strategy": "0x9d7eD45EE2E8FC5482fa2428f15C971e6369011d"
},
{
"name": "ankrETH",
"strategy": "0xa4C637e0F704745D182e4D38cAb7E7485321d059"
},
{
"name": "lsETH",
"strategy": "0xAe60d8180437b5C34bB956822ac2710972584473"
},
{ {
"name": "Beacon Chain ETH", "name": "Beacon Chain ETH",
"strategy": "0xbeaC0eeEeeeeEEeEeEEEEeeEEeEeeeEeeEEBEaC0" "strategy": "0xbeaC0eeEeeeeEEeEeEEEEeeEEeEeeeEeeEEBEaC0"

@ -0,0 +1,4 @@
export ROUTER_ADDRESS=0xA34ceDf9068C5deE726C67A4e1DCfCc2D6E2A7fD
export ERC20_ADDRESS=0x2416092f143378750bb29b79eD961ab195CcEea5
export XERC20_ADDRESS=0x2416092f143378750bb29b79eD961ab195CcEea5
export RPC_URL="https://rpc.blast.io"

@ -0,0 +1,5 @@
export ROUTER_ADDRESS=0x8dfbEA2582F41c8C4Eb25252BbA392fd3c09449A
export ADMIN_ADDRESS=0xa5B0D537CeBE97f087Dc5FE5732d70719caaEc1D
export ERC20_ADDRESS=0xbf5495Efe5DB9ce00f80364C8B423567e58d2110
export XERC20_ADDRESS=0x2416092f143378750bb29b79eD961ab195CcEea5
export RPC_URL="https://eth.merkle.io"

@ -0,0 +1,50 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity >=0.8.0;
import "forge-std/Script.sol";
import {AnvilRPC} from "test/AnvilRPC.sol";
import {TypeCasts} from "contracts/libs/TypeCasts.sol";
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
import {ProxyAdmin} from "contracts/upgrade/ProxyAdmin.sol";
import {HypXERC20Lockbox} from "contracts/token/extensions/HypXERC20Lockbox.sol";
import {IXERC20Lockbox} from "contracts/token/interfaces/IXERC20Lockbox.sol";
import {IXERC20} from "contracts/token/interfaces/IXERC20.sol";
import {IERC20} from "contracts/token/interfaces/IXERC20.sol";
// source .env.<CHAIN>
// forge script ApproveLockbox.s.sol --broadcast --rpc-url localhost:XXXX
contract ApproveLockbox is Script {
address router = vm.envAddress("ROUTER_ADDRESS");
address admin = vm.envAddress("ADMIN_ADDRESS");
uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY");
ITransparentUpgradeableProxy proxy = ITransparentUpgradeableProxy(router);
HypXERC20Lockbox old = HypXERC20Lockbox(router);
address lockbox = address(old.lockbox());
address mailbox = address(old.mailbox());
ProxyAdmin proxyAdmin = ProxyAdmin(admin);
function run() external {
assert(proxyAdmin.getProxyAdmin(proxy) == admin);
vm.startBroadcast(deployerPrivateKey);
HypXERC20Lockbox logic = new HypXERC20Lockbox(lockbox, mailbox);
proxyAdmin.upgradeAndCall(
proxy,
address(logic),
abi.encodeCall(HypXERC20Lockbox.approveLockbox, ())
);
vm.stopBroadcast();
vm.expectRevert("Initializable: contract is already initialized");
HypXERC20Lockbox(address(proxy)).initialize(
address(0),
address(0),
mailbox
);
}
}

@ -0,0 +1,37 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity >=0.8.0;
import "forge-std/Script.sol";
import {AnvilRPC} from "test/AnvilRPC.sol";
import {IXERC20Lockbox} from "contracts/token/interfaces/IXERC20Lockbox.sol";
import {IXERC20} from "contracts/token/interfaces/IXERC20.sol";
import {IERC20} from "contracts/token/interfaces/IXERC20.sol";
// source .env.<CHAIN>
// anvil --fork-url $RPC_URL --port XXXX
// forge script GrantLimits.s.sol --broadcast --unlocked --rpc-url localhost:XXXX
contract GrantLimits is Script {
address tester = 0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba;
uint256 amount = 1 gwei;
address router = vm.envAddress("ROUTER_ADDRESS");
IERC20 erc20 = IERC20(vm.envAddress("ERC20_ADDRESS"));
IXERC20 xerc20 = IXERC20(vm.envAddress("XERC20_ADDRESS"));
function runFrom(address account) internal {
AnvilRPC.setBalance(account, 1 ether);
AnvilRPC.impersonateAccount(account);
vm.broadcast(account);
}
function run() external {
address owner = xerc20.owner();
runFrom(owner);
xerc20.setLimits(router, amount, amount);
runFrom(address(erc20));
erc20.transfer(tester, amount);
}
}

@ -0,0 +1,127 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity >=0.8.0;
import "forge-std/Script.sol";
import {IXERC20Lockbox} from "../../contracts/token/interfaces/IXERC20Lockbox.sol";
import {IXERC20} from "../../contracts/token/interfaces/IXERC20.sol";
import {IERC20} from "../../contracts/token/interfaces/IXERC20.sol";
import {HypXERC20Lockbox} from "../../contracts/token/extensions/HypXERC20Lockbox.sol";
import {HypERC20Collateral} from "../../contracts/token/HypERC20Collateral.sol";
import {HypXERC20} from "../../contracts/token/extensions/HypXERC20.sol";
import {TransparentUpgradeableProxy} from "../../contracts/upgrade/TransparentUpgradeableProxy.sol";
import {ProxyAdmin} from "../../contracts/upgrade/ProxyAdmin.sol";
import {TypeCasts} from "../../contracts/libs/TypeCasts.sol";
import {TokenMessage} from "../../contracts/token/libs/TokenMessage.sol";
contract ezETH is Script {
using TypeCasts for address;
string ETHEREUM_RPC_URL = vm.envString("ETHEREUM_RPC_URL");
string BLAST_RPC_URL = vm.envString("BLAST_RPC_URL");
uint256 ethereumFork;
uint32 ethereumDomainId = 1;
address ethereumMailbox = 0xc005dc82818d67AF737725bD4bf75435d065D239;
address ethereumLockbox = 0xC8140dA31E6bCa19b287cC35531c2212763C2059;
uint256 blastFork;
uint32 blastDomainId = 81457;
address blastXERC20 = 0x2416092f143378750bb29b79eD961ab195CcEea5;
address blastMailbox = 0x3a867fCfFeC2B790970eeBDC9023E75B0a172aa7;
uint256 amount = 100;
function setUp() public {
ethereumFork = vm.createFork(ETHEREUM_RPC_URL);
blastFork = vm.createFork(BLAST_RPC_URL);
}
function run() external {
address deployer = address(this);
bytes32 recipient = deployer.addressToBytes32();
bytes memory tokenMessage = TokenMessage.format(recipient, amount, "");
vm.selectFork(ethereumFork);
HypXERC20Lockbox hypXERC20Lockbox = new HypXERC20Lockbox(
ethereumLockbox,
ethereumMailbox
);
ProxyAdmin ethAdmin = new ProxyAdmin();
TransparentUpgradeableProxy ethProxy = new TransparentUpgradeableProxy(
address(hypXERC20Lockbox),
address(ethAdmin),
abi.encodeCall(
HypXERC20Lockbox.initialize,
(address(0), address(0), deployer)
)
);
hypXERC20Lockbox = HypXERC20Lockbox(address(ethProxy));
vm.selectFork(blastFork);
HypXERC20 hypXERC20 = new HypXERC20(blastXERC20, blastMailbox);
ProxyAdmin blastAdmin = new ProxyAdmin();
TransparentUpgradeableProxy blastProxy = new TransparentUpgradeableProxy(
address(hypXERC20),
address(blastAdmin),
abi.encodeCall(
HypERC20Collateral.initialize,
(address(0), address(0), deployer)
)
);
hypXERC20 = HypXERC20(address(blastProxy));
hypXERC20.enrollRemoteRouter(
ethereumDomainId,
address(hypXERC20Lockbox).addressToBytes32()
);
// grant `amount` mint and burn limit to warp route
vm.prank(IXERC20(blastXERC20).owner());
IXERC20(blastXERC20).setLimits(address(hypXERC20), amount, amount);
// test sending `amount` on warp route
vm.prank(0x7BE481D464CAD7ad99500CE8A637599eB8d0FCDB); // ezETH whale
IXERC20(blastXERC20).transfer(address(this), amount);
IXERC20(blastXERC20).approve(address(hypXERC20), amount);
uint256 value = hypXERC20.quoteGasPayment(ethereumDomainId);
hypXERC20.transferRemote{value: value}(
ethereumDomainId,
recipient,
amount
);
// test receiving `amount` on warp route
vm.prank(blastMailbox);
hypXERC20.handle(
ethereumDomainId,
address(hypXERC20Lockbox).addressToBytes32(),
tokenMessage
);
vm.selectFork(ethereumFork);
hypXERC20Lockbox.enrollRemoteRouter(
blastDomainId,
address(hypXERC20).addressToBytes32()
);
// grant `amount` mint and burn limit to warp route
IXERC20 ethereumXERC20 = hypXERC20Lockbox.xERC20();
vm.prank(ethereumXERC20.owner());
ethereumXERC20.setLimits(address(hypXERC20Lockbox), amount, amount);
// test sending `amount` on warp route
IERC20 erc20 = IXERC20Lockbox(ethereumLockbox).ERC20();
vm.prank(ethereumLockbox);
erc20.transfer(address(this), amount);
erc20.approve(address(hypXERC20Lockbox), amount);
hypXERC20Lockbox.transferRemote(blastDomainId, recipient, amount);
// test receiving `amount` on warp route
vm.prank(ethereumMailbox);
hypXERC20Lockbox.handle(
blastDomainId,
address(hypXERC20).addressToBytes32(),
tokenMessage
);
}
}

@ -0,0 +1,107 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity >=0.8.0;
import "forge-std/Vm.sol";
import {Strings} from "@openzeppelin/contracts/utils/Strings.sol";
// see https://book.getfoundry.sh/reference/anvil/#supported-rpc-methods
library AnvilRPC {
using Strings for address;
using Strings for uint256;
using AnvilRPC for string;
using AnvilRPC for string[1];
using AnvilRPC for string[2];
using AnvilRPC for string[3];
Vm private constant vm =
Vm(address(uint160(uint256(keccak256("hevm cheat code")))));
string private constant OPEN_ARRAY = "[";
string private constant CLOSE_ARRAY = "]";
string private constant COMMA = ",";
string private constant EMPTY_ARRAY = "[]";
function escaped(
string memory value
) internal pure returns (string memory) {
return string.concat(ESCAPED_QUOTE, value, ESCAPED_QUOTE);
}
function toString(
string[1] memory values
) internal pure returns (string memory) {
return string.concat(OPEN_ARRAY, values[0], CLOSE_ARRAY);
}
function toString(
string[2] memory values
) internal pure returns (string memory) {
return
string.concat(OPEN_ARRAY, values[0], COMMA, values[1], CLOSE_ARRAY);
}
function toString(
string[3] memory values
) internal pure returns (string memory) {
return
string.concat(
OPEN_ARRAY,
values[0],
COMMA,
values[1],
COMMA,
values[2],
CLOSE_ARRAY
);
}
function impersonateAccount(address account) internal {
vm.rpc(
"anvil_impersonateAccount",
[account.toHexString().escaped()].toString()
);
}
function setBalance(address account, uint256 balance) internal {
vm.rpc(
"anvil_setBalance",
[account.toHexString().escaped(), balance.toString()].toString()
);
}
function setCode(address account, bytes memory code) internal {
vm.rpc(
"anvil_setCode",
[account.toHexString().escaped(), string(code).escaped()].toString()
);
}
function setStorageAt(
address account,
uint256 slot,
uint256 value
) internal {
vm.rpc(
"anvil_setStorageAt",
[
account.toHexString().escaped(),
slot.toHexString(),
value.toHexString()
].toString()
);
}
function resetFork(string memory rpcUrl) internal {
string memory obj = string.concat(
// solhint-disable-next-line quotes
'{"forking":{"jsonRpcUrl":',
string(rpcUrl).escaped(),
"}}"
);
vm.rpc("anvil_reset", [obj].toString());
}
}
// here to prevent syntax highlighting from breaking
string constant ESCAPED_QUOTE = '"';

@ -479,6 +479,7 @@ contract InterchainAccountRouterTest is Test {
uint64 payment, uint64 payment,
bytes32 data bytes32 data
) public { ) public {
CallLib.Call[] memory calls = getCalls(data);
vm.assume(payment < gasLimit * igp.gasPrice()); vm.assume(payment < gasLimit * igp.gasPrice());
// arrange // arrange
bytes memory metadata = StandardHookMetadata.formatMetadata( bytes memory metadata = StandardHookMetadata.formatMetadata(
@ -495,11 +496,7 @@ contract InterchainAccountRouterTest is Test {
// act // act
vm.expectRevert("IGP: insufficient interchain gas payment"); vm.expectRevert("IGP: insufficient interchain gas payment");
originRouter.callRemote{value: payment}( originRouter.callRemote{value: payment}(destination, calls, metadata);
destination,
getCalls(data),
metadata
);
} }
function testFuzz_callRemoteWithOverrides_default(bytes32 data) public { function testFuzz_callRemoteWithOverrides_default(bytes32 data) public {

@ -19,13 +19,15 @@ import {TransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transpa
import {Mailbox} from "../../contracts/Mailbox.sol"; import {Mailbox} from "../../contracts/Mailbox.sol";
import {TypeCasts} from "../../contracts/libs/TypeCasts.sol"; import {TypeCasts} from "../../contracts/libs/TypeCasts.sol";
import {TestMailbox} from "../../contracts/test/TestMailbox.sol"; import {TestMailbox} from "../../contracts/test/TestMailbox.sol";
import {XERC20Test, FiatTokenTest, ERC20Test} from "../../contracts/test/ERC20Test.sol"; import {XERC20LockboxTest, XERC20Test, FiatTokenTest, ERC20Test} from "../../contracts/test/ERC20Test.sol";
import {TestPostDispatchHook} from "../../contracts/test/TestPostDispatchHook.sol"; import {TestPostDispatchHook} from "../../contracts/test/TestPostDispatchHook.sol";
import {TestInterchainGasPaymaster} from "../../contracts/test/TestInterchainGasPaymaster.sol"; import {TestInterchainGasPaymaster} from "../../contracts/test/TestInterchainGasPaymaster.sol";
import {GasRouter} from "../../contracts/client/GasRouter.sol"; import {GasRouter} from "../../contracts/client/GasRouter.sol";
import {IPostDispatchHook} from "../../contracts/interfaces/hooks/IPostDispatchHook.sol";
import {HypERC20} from "../../contracts/token/HypERC20.sol"; import {HypERC20} from "../../contracts/token/HypERC20.sol";
import {HypERC20Collateral} from "../../contracts/token/HypERC20Collateral.sol"; import {HypERC20Collateral} from "../../contracts/token/HypERC20Collateral.sol";
import {HypXERC20Lockbox} from "../../contracts/token/extensions/HypXERC20Lockbox.sol";
import {IXERC20} from "../../contracts/token/interfaces/IXERC20.sol"; import {IXERC20} from "../../contracts/token/interfaces/IXERC20.sol";
import {IFiatToken} from "../../contracts/token/interfaces/IFiatToken.sol"; import {IFiatToken} from "../../contracts/token/interfaces/IFiatToken.sol";
import {HypXERC20} from "../../contracts/token/extensions/HypXERC20.sol"; import {HypXERC20} from "../../contracts/token/extensions/HypXERC20.sol";
@ -197,38 +199,38 @@ abstract contract HypTokenTest is Test {
function _performRemoteTransferWithHook( function _performRemoteTransferWithHook(
uint256 _msgValue, uint256 _msgValue,
uint256 _amount uint256 _amount,
address _hook,
bytes memory _hookMetadata
) internal returns (bytes32 messageId) { ) internal returns (bytes32 messageId) {
vm.prank(ALICE); vm.prank(ALICE);
messageId = localToken.transferRemote{value: _msgValue}( messageId = localToken.transferRemote{value: _msgValue}(
DESTINATION, DESTINATION,
BOB.addressToBytes32(), BOB.addressToBytes32(),
_amount, _amount,
bytes(""), _hookMetadata,
address(noopHook) address(_hook)
); );
_processTransfers(BOB, _amount); _processTransfers(BOB, _amount);
assertEq(remoteToken.balanceOf(BOB), _amount); assertEq(remoteToken.balanceOf(BOB), _amount);
} }
function testTransfer_withHookSpecified() public { function testTransfer_withHookSpecified(
uint256 fee,
bytes calldata metadata
) public {
TestPostDispatchHook hook = new TestPostDispatchHook();
hook.setFee(fee);
vm.prank(ALICE); vm.prank(ALICE);
primaryToken.approve(address(localToken), TRANSFER_AMT); primaryToken.approve(address(localToken), TRANSFER_AMT);
bytes32 messageId = _performRemoteTransferWithHook( bytes32 messageId = _performRemoteTransferWithHook(
REQUIRED_VALUE, REQUIRED_VALUE,
TRANSFER_AMT TRANSFER_AMT,
address(hook),
metadata
); );
assertTrue(noopHook.messageDispatched(messageId)); assertTrue(hook.messageDispatched(messageId));
/// @dev Using this test would be ideal, but vm.expectCall with nested functions more than 1 level deep is broken
/// In other words, the call graph of Route.transferRemote() -> Mailbox.dispatch() -> Hook.postDispatch() does not work with expectCall
// vm.expectCall(
// address(noopHook),
// abi.encodeCall(
// IPostDispatchHook.postDispatch,
// (bytes(""), outboundMessage)
// )
// );
/// @dev Also, using expectedCall with Mailbox.dispatch() won't work either because overloaded function selection is broken, see https://github.com/ethereum/solidity/issues/13815
} }
function testBenchmark_overheadGasUsage() public virtual { function testBenchmark_overheadGasUsage() public virtual {
@ -442,6 +444,80 @@ contract HypXERC20Test is HypTokenTest {
} }
} }
contract HypXERC20LockboxTest is HypTokenTest {
using TypeCasts for address;
HypXERC20Lockbox internal xerc20Lockbox;
function setUp() public override {
super.setUp();
XERC20LockboxTest lockbox = new XERC20LockboxTest(
NAME,
SYMBOL,
TOTAL_SUPPLY,
DECIMALS
);
primaryToken = ERC20Test(address(lockbox.ERC20()));
localToken = new HypXERC20Lockbox(
address(lockbox),
address(localMailbox)
);
xerc20Lockbox = HypXERC20Lockbox(address(localToken));
xerc20Lockbox.enrollRemoteRouter(
DESTINATION,
address(remoteToken).addressToBytes32()
);
primaryToken.transfer(ALICE, 1000e18);
_enrollRemoteTokenRouter();
}
uint256 constant MAX_INT = 2 ** 256 - 1;
function testApproval() public {
assertEq(
xerc20Lockbox.xERC20().allowance(
address(localToken),
address(xerc20Lockbox.lockbox())
),
MAX_INT
);
assertEq(
xerc20Lockbox.wrappedToken().allowance(
address(localToken),
address(xerc20Lockbox.lockbox())
),
MAX_INT
);
}
function testRemoteTransfer() public {
uint256 balanceBefore = localToken.balanceOf(ALICE);
vm.prank(ALICE);
primaryToken.approve(address(localToken), TRANSFER_AMT);
vm.expectCall(
address(xerc20Lockbox.xERC20()),
abi.encodeCall(IXERC20.burn, (address(localToken), TRANSFER_AMT))
);
_performRemoteTransferWithEmit(REQUIRED_VALUE, TRANSFER_AMT, 0);
assertEq(localToken.balanceOf(ALICE), balanceBefore - TRANSFER_AMT);
}
function testHandle() public {
uint256 balanceBefore = localToken.balanceOf(ALICE);
vm.expectCall(
address(xerc20Lockbox.xERC20()),
abi.encodeCall(IXERC20.mint, (address(localToken), TRANSFER_AMT))
);
_handleLocalTransfer(TRANSFER_AMT);
assertEq(localToken.balanceOf(ALICE), balanceBefore + TRANSFER_AMT);
}
}
contract HypFiatTokenTest is HypTokenTest { contract HypFiatTokenTest is HypTokenTest {
using TypeCasts for address; using TypeCasts for address;
HypFiatToken internal fiatToken; HypFiatToken internal fiatToken;

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save