chore(cli): merge branch 'main' into cli-2.0 (#4063)

### Description

<!--
What's included in this PR?
-->

### Drive-by changes

<!--
Are there any minor or drive-by changes also included?
-->

### Related issues

<!--
- Fixes #[issue number here]
-->

### Backward compatibility

<!--
Are these changes backward compatible? Are there any infrastructure
implications, e.g. changes that would prohibit deploying older commits
using this infra tooling?

Yes/No
-->

### Testing

<!--
What kind of testing have these changes undergone?

None/Manual/Unit Tests
-->
pull/3981/head
Noah Bayindirli 🥂 5 months ago committed by GitHub
commit 66ec15af3d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 6
      .changeset/brave-penguins-ring.md
  2. 7
      .changeset/clean-numbers-know.md
  3. 6
      .changeset/mean-impalas-leave.md
  4. 5
      .changeset/olive-geckos-behave.md
  5. 6
      .changeset/sixty-ducks-brush.md
  6. 3
      .github/workflows/monorepo-docker.yml
  7. 3
      .github/workflows/rust-docker.yml
  8. 3
      .github/workflows/rust-skipped.yml
  9. 6
      .github/workflows/rust.yml
  10. 3
      .github/workflows/test-skipped.yml
  11. 2
      .github/workflows/test.yml
  12. 44
      rust/Cargo.lock
  13. 5
      rust/Cargo.toml
  14. 3
      rust/agents/relayer/Cargo.toml
  15. 123
      rust/agents/relayer/src/msg/blacklist.rs
  16. 3
      rust/agents/relayer/src/msg/metadata/base.rs
  17. 1
      rust/agents/relayer/src/msg/mod.rs
  18. 54
      rust/agents/relayer/src/msg/op_queue.rs
  19. 58
      rust/agents/relayer/src/msg/op_submitter.rs
  20. 261
      rust/agents/relayer/src/msg/pending_message.rs
  21. 47
      rust/agents/relayer/src/msg/processor.rs
  22. 34
      rust/agents/relayer/src/relayer.rs
  23. 64
      rust/agents/relayer/src/settings/mod.rs
  24. 24
      rust/agents/scraper/migration/src/m20230309_000001_create_table_domain.rs
  25. 131
      rust/agents/validator/src/submit.rs
  26. 29
      rust/chains/hyperlane-cosmos/src/interchain_gas.rs
  27. 26
      rust/chains/hyperlane-cosmos/src/mailbox.rs
  28. 28
      rust/chains/hyperlane-cosmos/src/merkle_tree_hook.rs
  29. 2
      rust/chains/hyperlane-cosmos/src/providers/grpc.rs
  30. 15
      rust/chains/hyperlane-cosmos/src/providers/rpc.rs
  31. 31
      rust/chains/hyperlane-cosmos/src/utils.rs
  32. 210
      rust/config/mainnet_config.json
  33. 8
      rust/hyperlane-base/Cargo.toml
  34. 8
      rust/hyperlane-base/src/contract_sync/cursors/sequence_aware/forward.rs
  35. 10
      rust/hyperlane-base/src/db/rocks/hyperlane_db.rs
  36. 6
      rust/hyperlane-base/src/lib.rs
  37. 2
      rust/hyperlane-base/src/metrics/core.rs
  38. 3
      rust/hyperlane-base/src/settings/mod.rs
  39. 1
      rust/hyperlane-core/Cargo.toml
  40. 2
      rust/hyperlane-core/src/accumulator/incremental.rs
  41. 15
      rust/hyperlane-core/src/chain.rs
  42. 8
      rust/hyperlane-core/src/rpc_clients/retry.rs
  43. 163
      rust/hyperlane-core/src/traits/pending_operation.rs
  44. 8
      rust/hyperlane-core/src/types/indexing.rs
  45. 2
      rust/sealevel/libraries/account-utils/src/lib.rs
  46. 2
      rust/sealevel/libraries/ecdsa-signature/Cargo.toml
  47. 2
      rust/sealevel/libraries/message-recipient-interface/Cargo.toml
  48. 2
      rust/sealevel/programs/hyperlane-sealevel-igp/Cargo.toml
  49. 2
      rust/sealevel/programs/mailbox-test/src/functional.rs
  50. 2
      rust/sealevel/programs/mailbox/Cargo.toml
  51. 5
      rust/utils/run-locally/src/cosmos/types.rs
  52. 13
      solidity/CHANGELOG.md
  53. 5
      solidity/contracts/interfaces/avs/vendored/IDelegationManager.sol
  54. 1
      solidity/contracts/interfaces/isms/IMultisigIsm.sol
  55. 5
      solidity/contracts/isms/multisig/AbstractMultisigIsm.sol
  56. 4
      solidity/package.json
  57. 5
      typescript/.changeset/pink-mirrors-sneeze.md
  58. 5
      typescript/.changeset/tall-radios-grin.md
  59. 4
      typescript/ccip-server/CHANGELOG.md
  60. 2
      typescript/ccip-server/package.json
  61. 19
      typescript/cli/CHANGELOG.md
  62. 2
      typescript/cli/ci-advanced-test.sh
  63. 8
      typescript/cli/package.json
  64. 449
      typescript/cli/src/avs/check.ts
  65. 3
      typescript/cli/src/avs/config.ts
  66. 2
      typescript/cli/src/avs/stakeRegistry.ts
  67. 69
      typescript/cli/src/commands/avs.ts
  68. 12
      typescript/cli/src/commands/options.ts
  69. 8
      typescript/cli/src/logger.ts
  70. 69
      typescript/cli/src/validator/utils.ts
  71. 2
      typescript/cli/src/version.ts
  72. 20
      typescript/helloworld/CHANGELOG.md
  73. 8
      typescript/helloworld/package.json
  74. 28
      typescript/infra/CHANGELOG.md
  75. 60
      typescript/infra/config/environments/mainnet3/agent.ts
  76. 12
      typescript/infra/config/environments/mainnet3/aw-validators/hyperlane.json
  77. 9
      typescript/infra/config/environments/mainnet3/aw-validators/rc.json
  78. 17
      typescript/infra/config/environments/mainnet3/chains.ts
  79. 4544
      typescript/infra/config/environments/mainnet3/core/verification.json
  80. 8
      typescript/infra/config/environments/mainnet3/funding.ts
  81. 62
      typescript/infra/config/environments/mainnet3/gasPrices.json
  82. 9
      typescript/infra/config/environments/mainnet3/index.ts
  83. 186
      typescript/infra/config/environments/mainnet3/ism/verification.json
  84. 3
      typescript/infra/config/environments/mainnet3/owners.ts
  85. 4
      typescript/infra/config/environments/mainnet3/supportedChainNames.ts
  86. 48
      typescript/infra/config/environments/mainnet3/tokenPrices.json
  87. 54
      typescript/infra/config/environments/mainnet3/validators.ts
  88. 26
      typescript/infra/config/environments/mainnet3/warp/renzo-ezETH-addresses.json
  89. 37
      typescript/infra/config/environments/testnet4/agent.ts
  90. 2
      typescript/infra/config/registry.ts
  91. 10
      typescript/infra/package.json
  92. 21
      typescript/infra/scripts/agent-utils.ts
  93. 24
      typescript/infra/scripts/agents/utils.ts
  94. 4
      typescript/infra/scripts/announce-validators.ts
  95. 21
      typescript/infra/scripts/check-deploy.ts
  96. 57
      typescript/infra/scripts/deploy.ts
  97. 37
      typescript/infra/scripts/print-gas-prices.ts
  98. 12
      typescript/infra/scripts/print-token-prices.ts
  99. 14
      typescript/infra/src/agents/index.ts
  100. 43
      typescript/infra/src/config/agent/agent.ts
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,6 @@
---
'@hyperlane-xyz/cli': minor
'@hyperlane-xyz/core': minor
---
Add CLI command to support AVS validator status check

@ -0,0 +1,7 @@
---
'@hyperlane-xyz/helloworld': minor
'@hyperlane-xyz/infra': minor
'@hyperlane-xyz/cli': minor
---
Upgrade registry to 2.1.1

@ -1,6 +0,0 @@
---
'@hyperlane-xyz/core': patch
'@hyperlane-xyz/helloworld': patch
---
fix: `TokenRouter.transferRemote` with hook overrides

@ -1,5 +0,0 @@
---
'@hyperlane-xyz/sdk': patch
---
Do not consider xERC20 a collateral standard to fix fungibility checking logic while maintaining mint limit checking

@ -1,6 +0,0 @@
---
'@hyperlane-xyz/cli': patch
'@hyperlane-xyz/sdk': patch
---
Support priorityFee fetching from RPC and some better logging

@ -10,6 +10,7 @@ on:
- 'typescript/infra/**'
- 'Dockerfile'
- '.dockerignore'
- '.github/workflows/monorepo-docker.yml'
concurrency:
group: build-push-monorepo-${{ github.ref }}
@ -47,7 +48,7 @@ jobs:
id: taggen
run: |
echo "TAG_DATE=$(date +'%Y%m%d-%H%M%S')" >> $GITHUB_OUTPUT
echo "TAG_SHA=$(echo '${{ github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT
echo "TAG_SHA=$(echo '${{ github.event.pull_request.head.sha || github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT
- name: Docker meta
id: meta
uses: docker/metadata-action@v5

@ -7,6 +7,7 @@ on:
pull_request:
paths:
- 'rust/**'
- '.github/workflows/rust-docker.yml'
concurrency:
group: build-push-agents-${{ github.ref }}
cancel-in-progress: true
@ -40,7 +41,7 @@ jobs:
id: taggen
run: |
echo "TAG_DATE=$(date +'%Y%m%d-%H%M%S')" >> $GITHUB_OUTPUT
echo "TAG_SHA=$(echo '${{ github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT
echo "TAG_SHA=$(echo '${{ github.event.pull_request.head.sha || github.sha }}' | cut -b 1-7)" >> $GITHUB_OUTPUT
- name: Docker meta
id: meta
uses: docker/metadata-action@v5

@ -4,6 +4,9 @@ name: rust
on:
push:
branches: [main]
paths-ignore:
- 'rust/**'
- .github/workflows/rust.yml
pull_request:
branches: [main]
paths-ignore:

@ -1,6 +1,12 @@
name: rust
on:
push:
branches: [main]
paths:
- 'rust/**'
- .github/workflows/rust.yml
- '!*.md'
pull_request:
branches: [main]
paths:

@ -3,6 +3,9 @@ name: test
on:
push:
branches: [main]
paths:
- '*.md'
- '!**/*'
pull_request:
branches:
- '*'

@ -4,6 +4,8 @@ on:
# Triggers the workflow on pushes to main & cli-2.0 branches
push:
branches: [main, cli-2.0]
paths-ignore:
- '*.md'
# Triggers on pull requests ignoring md files
pull_request:
branches:

44
rust/Cargo.lock generated

@ -1094,13 +1094,13 @@ dependencies = [
[[package]]
name = "clang-sys"
version = "1.7.0"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1"
checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
dependencies = [
"glob",
"libc",
"libloading 0.8.1",
"libloading 0.8.4",
]
[[package]]
@ -3908,6 +3908,12 @@ dependencies = [
"unicode-segmentation",
]
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.1.19"
@ -4244,7 +4250,8 @@ dependencies = [
"serde_json",
"sha3 0.10.8",
"solana-sdk",
"strum 0.25.0",
"strum 0.26.3",
"strum_macros 0.26.4",
"thiserror",
"tiny-keccak 2.0.2",
"tokio",
@ -5175,12 +5182,12 @@ dependencies = [
[[package]]
name = "libloading"
version = "0.8.1"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161"
checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d"
dependencies = [
"cfg-if",
"windows-sys 0.48.0",
"windows-targets 0.52.0",
]
[[package]]
@ -5266,9 +5273,9 @@ dependencies = [
[[package]]
name = "libz-sys"
version = "1.1.14"
version = "1.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050"
checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9"
dependencies = [
"cc",
"pkg-config",
@ -6462,9 +6469,9 @@ dependencies = [
[[package]]
name = "prettyplease"
version = "0.2.16"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5"
checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7"
dependencies = [
"proc-macro2 1.0.76",
"syn 2.0.48",
@ -7037,11 +7044,12 @@ dependencies = [
"num-traits",
"once_cell",
"prometheus",
"rand 0.8.5",
"regex",
"reqwest",
"serde",
"serde_json",
"strum 0.25.0",
"strum 0.26.3",
"thiserror",
"tokio",
"tokio-metrics",
@ -9517,11 +9525,11 @@ dependencies = [
[[package]]
name = "strum"
version = "0.25.0"
version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
dependencies = [
"strum_macros 0.25.3",
"strum_macros 0.26.4",
]
[[package]]
@ -9551,11 +9559,11 @@ dependencies = [
[[package]]
name = "strum_macros"
version = "0.25.3"
version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be"
dependencies = [
"heck 0.4.1",
"heck 0.5.0",
"proc-macro2 1.0.76",
"quote 1.0.35",
"rustversion",

@ -119,6 +119,7 @@ pretty_env_logger = "0.5.0"
primitive-types = "=0.12.1"
prometheus = "0.13"
protobuf = "*"
rand = "0.8.5"
regex = "1.5"
reqwest = "0.11"
ripemd = "0.1.3"
@ -163,8 +164,8 @@ spl-token = { version = "=3.5.0", features = ["no-entrypoint"] }
spl-token-2022 = { version = "=0.5.0", features = ["no-entrypoint"] }
spl-type-length-value = "=0.1.0"
static_assertions = "1.1"
strum = "0.25.0"
strum_macros = "0.25.2"
strum = "0.26.2"
strum_macros = "0.26.2"
tempfile = "3.3"
tendermint = "0.32.2"
tendermint-rpc = { version = "0.32.0", features = ["http-client", "tokio"] }

@ -26,6 +26,7 @@ itertools.workspace = true
num-derive.workspace = true
num-traits.workspace = true
prometheus.workspace = true
rand.workspace = true
regex.workspace = true
reqwest = { workspace = true, features = ["json"] }
serde.workspace = true
@ -43,7 +44,7 @@ hyperlane-ethereum = { path = "../../chains/hyperlane-ethereum" }
[dev-dependencies]
once_cell.workspace = true
mockall.worksapce = true
mockall.workspace = true
tokio-test.workspace = true
hyperlane-test = { path = "../../hyperlane-test" }
hyperlane-base = { path = "../../hyperlane-base", features = ["test-utils"] }

@ -0,0 +1,123 @@
use hyperlane_core::HyperlaneMessage;
#[derive(Debug, Clone, Default)]
pub struct AddressBlacklist {
// A list of addresses that are blocked from being relayed.
// Addresses are any length to support different address types.
pub blacklist: Vec<Vec<u8>>,
}
impl AddressBlacklist {
pub fn new(blacklist: Vec<Vec<u8>>) -> Self {
Self { blacklist }
}
/// Returns true if the message is blocked by the blacklist.
/// At the moment, this only checks if the sender, recipient, or body of the
/// message contains any of the blocked addresses.
pub fn find_blacklisted_address(&self, message: &HyperlaneMessage) -> Option<Vec<u8>> {
self.blacklist.iter().find_map(|address| {
if is_subsequence(message.sender.as_bytes(), address)
|| is_subsequence(message.recipient.as_bytes(), address)
|| is_subsequence(&message.body, address)
{
// Return the blocked address that was found.
Some(address.clone())
} else {
None
}
})
}
}
/// Returns true if `needle` is a subsequence of `haystack`.
fn is_subsequence<T: PartialEq>(mut haystack: &[T], needle: &[T]) -> bool {
if needle.is_empty() {
return true;
}
while !haystack.is_empty() {
if needle.len() > haystack.len() {
return false;
}
if haystack.starts_with(needle) {
return true;
}
haystack = &haystack[1..];
}
false
}
#[cfg(test)]
mod test {
use hyperlane_core::H256;
use super::*;
#[test]
fn test_is_subsequence() {
assert!(is_subsequence(b"hello", b"hello"));
assert!(is_subsequence(b"hello", b"he"));
assert!(is_subsequence(b"hello", b"lo"));
assert!(is_subsequence(b"hello", b""));
assert!(is_subsequence(b"hello", b"o"));
assert!(!is_subsequence(b"hello", b"hello world"));
assert!(!is_subsequence(b"hello", b"world"));
assert!(!is_subsequence(b"hello", b"world hello"));
}
#[test]
fn test_is_blocked() {
let blocked = b"blocked";
let blocklist = AddressBlacklist::new(vec![blocked.to_vec()]);
let bytes_with_subsequence = |subsequence: &[u8], index: usize, len: usize| {
let mut bytes = vec![0; len];
bytes[index..index + subsequence.len()].copy_from_slice(subsequence);
bytes
};
let h256_with_subsequence = |subsequence: &[u8], index: usize| {
let bytes = bytes_with_subsequence(subsequence, index, H256::len_bytes());
H256::from_slice(&bytes)
};
// Blocked - sender includes the blocked address
let message = HyperlaneMessage {
sender: h256_with_subsequence(blocked, 0),
..Default::default()
};
assert_eq!(
blocklist.find_blacklisted_address(&message),
Some(blocked.to_vec())
);
// Blocked - recipient includes the blocked address
let message = HyperlaneMessage {
recipient: h256_with_subsequence(blocked, 20),
..Default::default()
};
assert_eq!(
blocklist.find_blacklisted_address(&message),
Some(blocked.to_vec())
);
// Blocked - body includes the blocked address
let message = HyperlaneMessage {
body: bytes_with_subsequence(blocked, 100 - blocked.len(), 100),
..Default::default()
};
assert_eq!(
blocklist.find_blacklisted_address(&message),
Some(blocked.to_vec())
);
// Not blocked - sender, recipient, and body do not include the blocked address
let message = HyperlaneMessage {
body: vec![1; 100],
..Default::default()
};
assert!(blocklist.find_blacklisted_address(&message).is_none());
}
}

@ -278,8 +278,9 @@ pub struct BaseMetadataBuilder {
allow_local_checkpoint_syncers: bool,
metrics: Arc<CoreMetrics>,
db: HyperlaneRocksDB,
max_depth: u32,
app_context_classifier: IsmAwareAppContextClassifier,
#[new(value = "7")]
max_depth: u32,
}
impl Debug for BaseMetadataBuilder {

@ -25,6 +25,7 @@
//! - FallbackProviderSubmitter (Serialized, but if some RPC provider sucks,
//! switch everyone to new one)
pub(crate) mod blacklist;
pub(crate) mod gas_payment;
pub(crate) mod metadata;
pub(crate) mod op_queue;

@ -1,7 +1,7 @@
use std::{cmp::Reverse, collections::BinaryHeap, sync::Arc};
use derive_new::new;
use hyperlane_core::{PendingOperation, QueueOperation};
use hyperlane_core::{PendingOperation, PendingOperationStatus, QueueOperation};
use prometheus::{IntGauge, IntGaugeVec};
use tokio::sync::{broadcast::Receiver, Mutex};
use tracing::{debug, info, instrument};
@ -21,8 +21,15 @@ pub struct OpQueue {
impl OpQueue {
/// Push an element onto the queue and update metrics
/// Arguments:
/// - `op`: the operation to push onto the queue
/// - `new_status`: optional new status to set for the operation. When an operation is added to a queue,
/// it's very likely that its status has just changed, so this forces the caller to consider the new status
#[instrument(skip(self), ret, fields(queue_label=%self.queue_metrics_label), level = "debug")]
pub async fn push(&self, op: QueueOperation) {
pub async fn push(&self, mut op: QueueOperation, new_status: Option<PendingOperationStatus>) {
if let Some(new_status) = new_status {
op.set_status(new_status);
}
// increment the metric before pushing onto the queue, because we lose ownership afterwards
self.get_operation_metric(op.as_ref()).inc();
@ -98,8 +105,12 @@ impl OpQueue {
/// Get the metric associated with this operation
fn get_operation_metric(&self, operation: &dyn PendingOperation) -> IntGauge {
let (destination, app_context) = operation.get_operation_labels();
self.metrics
.with_label_values(&[&destination, &self.queue_metrics_label, &app_context])
self.metrics.with_label_values(&[
&destination,
&self.queue_metrics_label,
&operation.status().to_string(),
&app_context,
])
}
}
@ -141,6 +152,12 @@ mod test {
self.id
}
fn status(&self) -> PendingOperationStatus {
PendingOperationStatus::FirstPrepareAttempt
}
fn set_status(&mut self, _status: PendingOperationStatus) {}
fn reset_attempts(&mut self) {
self.seconds_to_next_attempt = 0;
}
@ -149,6 +166,10 @@ mod test {
todo!()
}
fn retrieve_status_from_db(&self) -> Option<PendingOperationStatus> {
todo!()
}
fn get_operation_labels(&self) -> (String, String) {
Default::default()
}
@ -219,7 +240,12 @@ mod test {
(
IntGaugeVec::new(
prometheus::Opts::new("op_queue", "OpQueue metrics"),
&["destination", "queue_metrics_label", "app_context"],
&[
"destination",
"queue_metrics_label",
"operation_status",
"app_context",
],
)
.unwrap(),
"queue_metrics_label".to_string(),
@ -256,12 +282,22 @@ mod test {
// push to queue 1
for _ in 0..=2 {
op_queue_1.push(ops.pop_front().unwrap()).await;
op_queue_1
.push(
ops.pop_front().unwrap(),
Some(PendingOperationStatus::FirstPrepareAttempt),
)
.await;
}
// push to queue 2
for _ in 3..messages_to_send {
op_queue_2.push(ops.pop_front().unwrap()).await;
op_queue_2
.push(
ops.pop_front().unwrap(),
Some(PendingOperationStatus::FirstPrepareAttempt),
)
.await;
}
// Retry by message ids
@ -320,7 +356,9 @@ mod test {
// push to queue
for op in ops {
op_queue.push(op).await;
op_queue
.push(op, Some(PendingOperationStatus::FirstPrepareAttempt))
.await;
}
// Retry by domain

@ -5,6 +5,8 @@ use derive_new::new;
use futures::future::join_all;
use futures_util::future::try_join_all;
use hyperlane_core::total_estimated_cost;
use hyperlane_core::ConfirmReason::*;
use hyperlane_core::PendingOperationStatus;
use prometheus::{IntCounter, IntGaugeVec};
use tokio::sync::broadcast::Sender;
use tokio::sync::mpsc;
@ -184,7 +186,14 @@ async fn receive_task(
// make sure things are getting wired up correctly; if this works in testing it
// should also be valid in production.
debug_assert_eq!(*op.destination_domain(), domain);
prepare_queue.push(op).await;
let status = op.retrieve_status_from_db().unwrap_or_else(|| {
trace!(
?op,
"No status found for message, defaulting to FirstPrepareAttempt"
);
PendingOperationStatus::FirstPrepareAttempt
});
prepare_queue.push(op, Some(status)).await;
}
}
@ -220,7 +229,7 @@ async fn prepare_task(
.filter(|r| {
matches!(
r,
PendingOperationResult::NotReady | PendingOperationResult::Reprepare
PendingOperationResult::NotReady | PendingOperationResult::Reprepare(_)
)
})
.count();
@ -231,21 +240,27 @@ async fn prepare_task(
debug!(?op, "Operation prepared");
metrics.ops_prepared.inc();
// TODO: push multiple messages at once
submit_queue.push(op).await;
submit_queue
.push(op, Some(PendingOperationStatus::ReadyToSubmit))
.await;
}
PendingOperationResult::NotReady => {
prepare_queue.push(op).await;
prepare_queue.push(op, None).await;
}
PendingOperationResult::Reprepare => {
PendingOperationResult::Reprepare(reason) => {
metrics.ops_failed.inc();
prepare_queue.push(op).await;
prepare_queue
.push(op, Some(PendingOperationStatus::Retry(reason)))
.await;
}
PendingOperationResult::Drop => {
metrics.ops_dropped.inc();
}
PendingOperationResult::Confirm => {
PendingOperationResult::Confirm(reason) => {
debug!(?op, "Pushing operation to confirm queue");
confirm_queue.push(op).await;
confirm_queue
.push(op, Some(PendingOperationStatus::Confirm(reason)))
.await;
}
}
}
@ -297,7 +312,9 @@ async fn submit_single_operation(
op.submit().await;
debug!(?op, "Operation submitted");
op.set_next_attempt_after(CONFIRM_DELAY);
confirm_queue.push(op).await;
confirm_queue
.push(op, Some(PendingOperationStatus::Confirm(SubmittedBySelf)))
.await;
metrics.ops_submitted.inc();
if matches!(
@ -343,7 +360,7 @@ async fn confirm_task(
if op_results.iter().all(|op| {
matches!(
op,
PendingOperationResult::NotReady | PendingOperationResult::Confirm
PendingOperationResult::NotReady | PendingOperationResult::Confirm(_)
)
}) {
// None of the operations are ready, so wait for a little bit
@ -364,18 +381,25 @@ async fn confirm_operation(
debug_assert_eq!(*op.destination_domain(), domain);
let operation_result = op.confirm().await;
match operation_result {
match &operation_result {
PendingOperationResult::Success => {
debug!(?op, "Operation confirmed");
metrics.ops_confirmed.inc();
}
PendingOperationResult::NotReady | PendingOperationResult::Confirm => {
PendingOperationResult::NotReady => {
confirm_queue.push(op, None).await;
}
PendingOperationResult::Confirm(reason) => {
// TODO: push multiple messages at once
confirm_queue.push(op).await;
confirm_queue
.push(op, Some(PendingOperationStatus::Confirm(reason.clone())))
.await;
}
PendingOperationResult::Reprepare => {
PendingOperationResult::Reprepare(reason) => {
metrics.ops_failed.inc();
prepare_queue.push(op).await;
prepare_queue
.push(op, Some(PendingOperationStatus::Retry(reason.clone())))
.await;
}
PendingOperationResult::Drop => {
metrics.ops_dropped.inc();
@ -434,7 +458,9 @@ impl OperationBatch {
for mut op in self.operations {
op.set_operation_outcome(outcome.clone(), total_estimated_cost);
op.set_next_attempt_after(CONFIRM_DELAY);
confirm_queue.push(op).await;
confirm_queue
.push(op, Some(PendingOperationStatus::Confirm(SubmittedBySelf)))
.await;
}
return;
}

@ -9,12 +9,13 @@ use derive_new::new;
use eyre::Result;
use hyperlane_base::{db::HyperlaneRocksDB, CoreMetrics};
use hyperlane_core::{
gas_used_by_operation, make_op_try, BatchItem, ChainCommunicationError, ChainResult,
gas_used_by_operation, BatchItem, ChainCommunicationError, ChainResult, ConfirmReason,
HyperlaneChain, HyperlaneDomain, HyperlaneMessage, Mailbox, MessageSubmissionData,
PendingOperation, PendingOperationResult, TryBatchAs, TxOutcome, H256, U256,
PendingOperation, PendingOperationResult, PendingOperationStatus, ReprepareReason, TryBatchAs,
TxOutcome, H256, U256,
};
use prometheus::{IntCounter, IntGauge};
use tracing::{debug, error, info, instrument, trace, warn};
use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument};
use super::{
gas_payment::GasPaymentEnforcer,
@ -53,6 +54,7 @@ pub struct MessageContext {
pub struct PendingMessage {
pub message: HyperlaneMessage,
ctx: Arc<MessageContext>,
status: PendingOperationStatus,
app_context: Option<String>,
#[new(default)]
submitted: bool,
@ -120,6 +122,21 @@ impl PendingOperation for PendingMessage {
self.message.id()
}
fn status(&self) -> PendingOperationStatus {
self.status.clone()
}
fn set_status(&mut self, status: PendingOperationStatus) {
if let Err(e) = self
.ctx
.origin_db
.store_status_by_message_id(&self.message.id(), &self.status)
{
warn!(message_id = ?self.message.id(), err = %e, status = %self.status, "Persisting `status` failed for message");
}
self.status = status;
}
fn priority(&self) -> u32 {
self.message.nonce
}
@ -132,14 +149,22 @@ impl PendingOperation for PendingMessage {
self.ctx.destination_mailbox.domain()
}
fn retrieve_status_from_db(&self) -> Option<PendingOperationStatus> {
match self.ctx.origin_db.retrieve_status_by_message_id(&self.id()) {
Ok(status) => status,
Err(e) => {
warn!(error=?e, "Failed to retrieve status for message");
None
}
}
}
fn app_context(&self) -> Option<String> {
self.app_context.clone()
}
#[instrument(skip(self), ret, fields(id=?self.id()), level = "debug")]
async fn prepare(&mut self) -> PendingOperationResult {
make_op_try!(|| self.on_reprepare());
if !self.is_ready() {
trace!("Message is not ready to be submitted yet");
return PendingOperationResult::NotReady;
@ -148,27 +173,36 @@ impl PendingOperation for PendingMessage {
// If the message has already been processed, e.g. due to another relayer having
// already processed, then mark it as already-processed, and move on to
// the next tick.
let is_already_delivered = op_try!(
self.ctx
.destination_mailbox
.delivered(self.message.id())
.await,
"checking message delivery status"
);
let is_already_delivered = match self
.ctx
.destination_mailbox
.delivered(self.message.id())
.await
{
Ok(is_delivered) => is_delivered,
Err(err) => {
return self.on_reprepare(Some(err), ReprepareReason::ErrorCheckingDeliveryStatus);
}
};
if is_already_delivered {
debug!("Message has already been delivered, marking as submitted.");
self.submitted = true;
self.set_next_attempt_after(CONFIRM_DELAY);
return PendingOperationResult::Confirm;
return PendingOperationResult::Confirm(ConfirmReason::AlreadySubmitted);
}
let provider = self.ctx.destination_mailbox.provider();
// We cannot deliver to an address that is not a contract so check and drop if it isn't.
let is_contract = op_try!(
provider.is_contract(&self.message.recipient).await,
"checking if message recipient is a contract"
);
let is_contract = match provider.is_contract(&self.message.recipient).await {
Ok(is_contract) => is_contract,
Err(err) => {
return self.on_reprepare(
Some(err),
ReprepareReason::ErrorCheckingIfRecipientIsContract,
);
}
};
if !is_contract {
info!(
recipient=?self.message.recipient,
@ -177,56 +211,76 @@ impl PendingOperation for PendingMessage {
return PendingOperationResult::Drop;
}
let ism_address = op_try!(
self.ctx
.destination_mailbox
.recipient_ism(self.message.recipient)
.await,
"fetching ISM address. Potentially malformed recipient ISM address."
);
let ism_address = match self
.ctx
.destination_mailbox
.recipient_ism(self.message.recipient)
.await
{
Ok(ism_address) => ism_address,
Err(err) => {
return self.on_reprepare(Some(err), ReprepareReason::ErrorFetchingIsmAddress);
}
};
let message_metadata_builder = op_try!(
MessageMetadataBuilder::new(
ism_address,
&self.message,
self.ctx.metadata_builder.clone()
)
.await,
"getting the message metadata builder"
);
let message_metadata_builder = match MessageMetadataBuilder::new(
ism_address,
&self.message,
self.ctx.metadata_builder.clone(),
)
.await
{
Ok(message_metadata_builder) => message_metadata_builder,
Err(err) => {
return self.on_reprepare(Some(err), ReprepareReason::ErrorGettingMetadataBuilder);
}
};
let metadata = match message_metadata_builder
.build(ism_address, &self.message)
.await
{
Ok(metadata) => metadata,
Err(err) => {
return self.on_reprepare(Some(err), ReprepareReason::ErrorBuildingMetadata);
}
};
let Some(metadata) = op_try!(
message_metadata_builder
.build(ism_address, &self.message)
.await,
"building metadata"
) else {
info!("Could not fetch metadata");
return self.on_reprepare();
let Some(metadata) = metadata else {
return self.on_reprepare::<String>(None, ReprepareReason::CouldNotFetchMetadata);
};
// Estimate transaction costs for the process call. If there are issues, it's
// likely that gas estimation has failed because the message is
// reverting. This is defined behavior, so we just log the error and
// move onto the next tick.
let tx_cost_estimate = op_try!(
self.ctx
.destination_mailbox
.process_estimate_costs(&self.message, &metadata)
.await,
"estimating costs for process call"
);
let tx_cost_estimate = match self
.ctx
.destination_mailbox
.process_estimate_costs(&self.message, &metadata)
.await
{
Ok(metadata) => metadata,
Err(err) => {
return self.on_reprepare(Some(err), ReprepareReason::ErrorEstimatingGas);
}
};
// If the gas payment requirement hasn't been met, move to the next tick.
let Some(gas_limit) = op_try!(
self.ctx
.origin_gas_payment_enforcer
.message_meets_gas_payment_requirement(&self.message, &tx_cost_estimate)
.await,
"checking if message meets gas payment requirement"
) else {
warn!(?tx_cost_estimate, "Gas payment requirement not met yet");
return self.on_reprepare();
let gas_limit = match self
.ctx
.origin_gas_payment_enforcer
.message_meets_gas_payment_requirement(&self.message, &tx_cost_estimate)
.await
{
Ok(gas_limit) => gas_limit,
Err(err) => {
return self.on_reprepare(Some(err), ReprepareReason::ErrorCheckingGasRequirement);
}
};
let Some(gas_limit) = gas_limit else {
return self.on_reprepare::<String>(None, ReprepareReason::GasPaymentRequirementNotMet);
};
// Go ahead and attempt processing of message to destination chain.
@ -238,8 +292,8 @@ impl PendingOperation for PendingMessage {
if let Some(max_limit) = self.ctx.transaction_gas_limit {
if gas_limit > max_limit {
info!("Message delivery estimated gas exceeds max gas limit");
return self.on_reprepare();
// TODO: consider dropping instead of repreparing in this case
return self.on_reprepare::<String>(None, ReprepareReason::ExceedsMaxGasLimit);
}
}
@ -288,41 +342,41 @@ impl PendingOperation for PendingMessage {
}
async fn confirm(&mut self) -> PendingOperationResult {
make_op_try!(|| {
// Provider error; just try again later
// Note: this means that we are using `NotReady` for a retryable error case
self.inc_attempts();
PendingOperationResult::NotReady
});
if !self.is_ready() {
return PendingOperationResult::NotReady;
}
let is_delivered = op_try!(
self.ctx
.destination_mailbox
.delivered(self.message.id())
.await,
"Confirming message delivery"
);
let is_delivered = match self
.ctx
.destination_mailbox
.delivered(self.message.id())
.await
{
Ok(is_delivered) => is_delivered,
Err(err) => {
return self.on_reconfirm(Some(err), "Error confirming message delivery");
}
};
if is_delivered {
op_try!(
critical: self.record_message_process_success(),
"recording message process success"
);
if let Err(err) = self.record_message_process_success() {
return self
.on_reconfirm(Some(err), "Error when recording message process success");
}
info!(
submission=?self.submission_outcome,
"Message successfully processed"
);
PendingOperationResult::Success
} else {
warn!(
let span = info_span!(
"Error: Transaction attempting to process message either reverted or was reorged",
tx_outcome=?self.submission_outcome,
message_id=?self.message.id(),
"Transaction attempting to process message either reverted or was reorged"
message_id=?self.message.id()
);
self.on_reprepare()
self.on_reprepare::<String>(None, ReprepareReason::RevertedOrReorged)
.instrument(span)
.into_inner()
}
}
@ -395,7 +449,13 @@ impl PendingMessage {
ctx: Arc<MessageContext>,
app_context: Option<String>,
) -> Self {
let mut pm = Self::new(message, ctx, app_context);
let mut pm = Self::new(
message,
ctx,
// Since we don't persist the message status for now, assume it's the first attempt
PendingOperationStatus::FirstPrepareAttempt,
app_context,
);
match pm
.ctx
.origin_db
@ -414,10 +474,29 @@ impl PendingMessage {
pm
}
fn on_reprepare(&mut self) -> PendingOperationResult {
fn on_reprepare<E: Debug>(
&mut self,
err: Option<E>,
reason: ReprepareReason,
) -> PendingOperationResult {
self.inc_attempts();
self.submitted = false;
PendingOperationResult::Reprepare
if let Some(e) = err {
warn!(error = ?e, "Repreparing message: {}", reason.clone());
} else {
warn!("Repreparing message: {}", reason.clone());
}
PendingOperationResult::Reprepare(reason)
}
fn on_reconfirm<E: Debug>(&mut self, err: Option<E>, reason: &str) -> PendingOperationResult {
self.inc_attempts();
if let Some(e) = err {
warn!(error = ?e, id = ?self.id(), "Reconfirming message: {}", reason.clone());
} else {
warn!(id = ?self.id(), "Reconfirming message: {}", reason.clone());
}
PendingOperationResult::NotReady
}
fn is_ready(&self) -> bool {
@ -443,7 +522,6 @@ impl PendingMessage {
}
fn reset_attempts(&mut self) {
self.set_retries(0);
self.next_attempt_after = None;
self.last_attempted_at = Instant::now();
}
@ -484,8 +562,17 @@ impl PendingMessage {
i if (24..36).contains(&i) => 60 * 30,
// wait 60min for the next 12 attempts
i if (36..48).contains(&i) => 60 * 60,
// wait 3h for the next 12 attempts,
_ => 60 * 60 * 3,
// linearly increase the backoff time after 48 attempts,
// adding 1h for each additional attempt
_ => {
let hour: u64 = 60 * 60;
// To be extra safe, `max` to make sure it's at least 1 hour.
let target = hour.max((num_retries - 47) as u64 * hour);
// Schedule it at some random point in the next hour to
// avoid scheduling messages with the same # of retries
// at the exact same time.
target + (rand::random::<u64>() % hour)
}
}))
}
}

@ -8,6 +8,7 @@ use std::{
use async_trait::async_trait;
use derive_new::new;
use ethers::utils::hex;
use eyre::Result;
use hyperlane_base::{
db::{HyperlaneRocksDB, ProcessMessage},
@ -18,15 +19,19 @@ use prometheus::IntGauge;
use tokio::sync::mpsc::UnboundedSender;
use tracing::{debug, instrument, trace};
use super::{metadata::AppContextClassifier, pending_message::*};
use super::{blacklist::AddressBlacklist, metadata::AppContextClassifier, pending_message::*};
use crate::{processor::ProcessorExt, settings::matching_list::MatchingList};
/// Finds unprocessed messages from an origin and submits then through a channel
/// for to the appropriate destination.
#[allow(clippy::too_many_arguments)]
pub struct MessageProcessor {
whitelist: Arc<MatchingList>,
blacklist: Arc<MatchingList>,
/// A matching list of messages that should be whitelisted.
message_whitelist: Arc<MatchingList>,
/// A matching list of messages that should be blacklisted.
message_blacklist: Arc<MatchingList>,
/// Addresses that messages may not interact with.
address_blacklist: Arc<AddressBlacklist>,
metrics: MessageProcessorMetrics,
/// channel for each destination chain to send operations (i.e. message
/// submissions) to
@ -217,8 +222,8 @@ impl Debug for MessageProcessor {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"MessageProcessor {{ whitelist: {:?}, blacklist: {:?}, nonce_iterator: {:?}}}",
self.whitelist, self.blacklist, self.nonce_iterator
"MessageProcessor {{ message_whitelist: {:?}, message_blacklist: {:?}, address_blacklist: {:?}, nonce_iterator: {:?}}}",
self.message_whitelist, self.message_blacklist, self.address_blacklist, self.nonce_iterator
)
}
}
@ -247,14 +252,25 @@ impl ProcessorExt for MessageProcessor {
let destination = msg.destination;
// Skip if not whitelisted.
if !self.whitelist.msg_matches(&msg, true) {
debug!(?msg, whitelist=?self.whitelist, "Message not whitelisted, skipping");
if !self.message_whitelist.msg_matches(&msg, true) {
debug!(?msg, whitelist=?self.message_whitelist, "Message not whitelisted, skipping");
return Ok(());
}
// Skip if the message is blacklisted
if self.blacklist.msg_matches(&msg, false) {
debug!(?msg, blacklist=?self.blacklist, "Message blacklisted, skipping");
if self.message_whitelist.msg_matches(&msg, false) {
debug!(?msg, blacklist=?self.message_whitelist, "Message blacklisted, skipping");
return Ok(());
}
// Skip if the message involves a blacklisted address
if let Some(blacklisted_address) = self.address_blacklist.find_blacklisted_address(&msg)
{
debug!(
?msg,
blacklisted_address = hex::encode(blacklisted_address),
"Message involves blacklisted address, skipping"
);
return Ok(());
}
@ -291,18 +307,21 @@ impl ProcessorExt for MessageProcessor {
}
impl MessageProcessor {
#[allow(clippy::too_many_arguments)]
pub fn new(
db: HyperlaneRocksDB,
whitelist: Arc<MatchingList>,
blacklist: Arc<MatchingList>,
message_whitelist: Arc<MatchingList>,
message_blacklist: Arc<MatchingList>,
address_blacklist: Arc<AddressBlacklist>,
metrics: MessageProcessorMetrics,
send_channels: HashMap<u32, UnboundedSender<QueueOperation>>,
destination_ctxs: HashMap<u32, Arc<MessageContext>>,
metric_app_contexts: Vec<(MatchingList, String)>,
) -> Self {
Self {
whitelist,
blacklist,
message_whitelist,
message_blacklist,
address_blacklist,
metrics,
send_channels,
destination_ctxs,
@ -452,7 +471,6 @@ mod test {
false,
Arc::new(core_metrics),
db.clone(),
5,
IsmAwareAppContextClassifier::new(Arc::new(MockMailboxContract::default()), vec![]),
)
}
@ -478,6 +496,7 @@ mod test {
db.clone(),
Default::default(),
Default::default(),
Default::default(),
dummy_processor_metrics(origin_domain.id()),
HashMap::from([(destination_domain.id(), send_channel)]),
HashMap::from([(destination_domain.id(), message_context)]),

@ -33,6 +33,7 @@ use tracing::{error, info, info_span, instrument::Instrumented, warn, Instrument
use crate::{
merkle_tree::builder::MerkleTreeBuilder,
msg::{
blacklist::AddressBlacklist,
gas_payment::GasPaymentEnforcer,
metadata::{BaseMetadataBuilder, IsmAwareAppContextClassifier},
op_submitter::{SerialSubmitter, SerialSubmitterMetrics},
@ -70,8 +71,9 @@ pub struct Relayer {
prover_syncs: HashMap<HyperlaneDomain, Arc<RwLock<MerkleTreeBuilder>>>,
merkle_tree_hook_syncs: HashMap<HyperlaneDomain, Arc<dyn ContractSyncer<MerkleTreeInsertion>>>,
dbs: HashMap<HyperlaneDomain, HyperlaneRocksDB>,
whitelist: Arc<MatchingList>,
blacklist: Arc<MatchingList>,
message_whitelist: Arc<MatchingList>,
message_blacklist: Arc<MatchingList>,
address_blacklist: Arc<AddressBlacklist>,
transaction_gas_limit: Option<U256>,
skip_transaction_gas_limit_for: HashSet<u32>,
allow_local_checkpoint_syncers: bool,
@ -89,11 +91,12 @@ impl Debug for Relayer {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Relayer {{ origin_chains: {:?}, destination_chains: {:?}, whitelist: {:?}, blacklist: {:?}, transaction_gas_limit: {:?}, skip_transaction_gas_limit_for: {:?}, allow_local_checkpoint_syncers: {:?} }}",
"Relayer {{ origin_chains: {:?}, destination_chains: {:?}, message_whitelist: {:?}, message_blacklist: {:?}, address_blacklist: {:?}, transaction_gas_limit: {:?}, skip_transaction_gas_limit_for: {:?}, allow_local_checkpoint_syncers: {:?} }}",
self.origin_chains,
self.destination_chains,
self.whitelist,
self.blacklist,
self.message_whitelist,
self.message_blacklist,
self.address_blacklist,
self.transaction_gas_limit,
self.skip_transaction_gas_limit_for,
self.allow_local_checkpoint_syncers
@ -177,14 +180,16 @@ impl BaseAgent for Relayer {
.map(|(k, v)| (k, v as _))
.collect();
let whitelist = Arc::new(settings.whitelist);
let blacklist = Arc::new(settings.blacklist);
let message_whitelist = Arc::new(settings.whitelist);
let message_blacklist = Arc::new(settings.blacklist);
let address_blacklist = Arc::new(AddressBlacklist::new(settings.address_blacklist));
let skip_transaction_gas_limit_for = settings.skip_transaction_gas_limit_for;
let transaction_gas_limit = settings.transaction_gas_limit;
info!(
%whitelist,
%blacklist,
%message_whitelist,
%message_blacklist,
?address_blacklist,
?transaction_gas_limit,
?skip_transaction_gas_limit_for,
"Whitelist configuration"
@ -242,7 +247,6 @@ impl BaseAgent for Relayer {
settings.allow_local_checkpoint_syncers,
core.metrics.clone(),
db,
5,
IsmAwareAppContextClassifier::new(
mailboxes[destination].clone(),
settings.metric_app_contexts.clone(),
@ -276,8 +280,9 @@ impl BaseAgent for Relayer {
interchain_gas_payment_syncs,
prover_syncs,
merkle_tree_hook_syncs,
whitelist,
blacklist,
message_whitelist,
message_blacklist,
address_blacklist,
transaction_gas_limit,
skip_transaction_gas_limit_for,
allow_local_checkpoint_syncers: settings.allow_local_checkpoint_syncers,
@ -488,8 +493,9 @@ impl Relayer {
let message_processor = MessageProcessor::new(
self.dbs.get(origin).unwrap().clone(),
self.whitelist.clone(),
self.blacklist.clone(),
self.message_whitelist.clone(),
self.message_blacklist.clone(),
self.address_blacklist.clone(),
metrics,
send_channels,
destination_ctxs,

@ -8,6 +8,7 @@ use std::{collections::HashSet, path::PathBuf};
use convert_case::Case;
use derive_more::{AsMut, AsRef, Deref, DerefMut};
use ethers::utils::hex;
use eyre::{eyre, Context};
use hyperlane_base::{
impl_loadable_from_settings,
@ -46,6 +47,10 @@ pub struct RelayerSettings {
pub whitelist: MatchingList,
/// Filter for what messages to block.
pub blacklist: MatchingList,
/// Filter for what addresses to block interactions with.
/// This is intentionally not an H256 to allow for addresses of any length without
/// adding any padding.
pub address_blacklist: Vec<Vec<u8>>,
/// This is optional. If not specified, any amount of gas will be valid, otherwise this
/// is the max allowed gas in wei to relay a transaction.
pub transaction_gas_limit: Option<U256>,
@ -191,6 +196,14 @@ impl FromRawConf<RawRelayerSettings> for RelayerSettings {
.and_then(parse_matching_list)
.unwrap_or_default();
let address_blacklist = p
.chain(&mut err)
.get_opt_key("addressBlacklist")
.parse_string()
.end()
.map(|str| parse_address_list(str, &mut err, || &p.cwp + "address_blacklist"))
.unwrap_or_default();
let transaction_gas_limit = p
.chain(&mut err)
.get_opt_key("transactionGasLimit")
@ -268,6 +281,7 @@ impl FromRawConf<RawRelayerSettings> for RelayerSettings {
gas_payment_enforcement,
whitelist,
blacklist,
address_blacklist,
transaction_gas_limit,
skip_transaction_gas_limit_for,
allow_local_checkpoint_syncers,
@ -311,3 +325,53 @@ fn parse_matching_list(p: ValueParser) -> ConfigResult<MatchingList> {
err.into_result(ml)
}
fn parse_address_list(
str: &str,
err: &mut ConfigParsingError,
err_path: impl Fn() -> ConfigPath,
) -> Vec<Vec<u8>> {
str.split(',')
.filter_map(|s| {
let mut s = s.trim().to_owned();
if let Some(stripped) = s.strip_prefix("0x") {
s = stripped.to_owned();
}
hex::decode(s).take_err(err, &err_path)
})
.collect_vec()
}
#[cfg(test)]
mod test {
use super::*;
use hyperlane_core::H160;
#[test]
fn test_parse_address_blacklist() {
let valid_address1 = b"valid".to_vec();
let valid_address2 = H160::random().as_bytes().to_vec();
// Successful parsing
let input = format!(
"0x{}, {}",
hex::encode(&valid_address1),
hex::encode(&valid_address2)
);
let mut err = ConfigParsingError::default();
let res = parse_address_list(&input, &mut err, ConfigPath::default);
assert_eq!(res, vec![valid_address1.clone(), valid_address2.clone()]);
assert!(err.is_ok());
// An error in the final address provided
let input = format!(
"0x{}, {}, 0xaazz",
hex::encode(&valid_address1),
hex::encode(&valid_address2)
);
let mut err = ConfigParsingError::default();
let res = parse_address_list(&input, &mut err, ConfigPath::default);
assert_eq!(res, vec![valid_address1, valid_address2]);
assert!(!err.is_ok());
}
}

@ -94,6 +94,14 @@ const DOMAINS: &[RawDomain] = &[
is_test_net: false,
is_deprecated: false,
},
RawDomain {
name: "fraxtal",
token: "frxETH",
domain: 252,
chain_id: 252,
is_test_net: false,
is_deprecated: false,
},
RawDomain {
name: "fuji",
token: "AVAX",
@ -110,6 +118,14 @@ const DOMAINS: &[RawDomain] = &[
is_test_net: false,
is_deprecated: false,
},
RawDomain {
name: "linea",
token: "ETH",
domain: 59144,
chain_id: 59144,
is_test_net: false,
is_deprecated: false,
},
RawDomain {
name: "mantapacific",
token: "ETH",
@ -182,6 +198,14 @@ const DOMAINS: &[RawDomain] = &[
is_test_net: true,
is_deprecated: false,
},
RawDomain {
name: "sei",
token: "SEI",
domain: 1329,
chain_id: 1329,
is_test_net: false,
is_deprecated: false,
},
RawDomain {
name: "sepolia",
token: "ETH",

@ -4,7 +4,7 @@ use std::time::{Duration, Instant};
use std::vec;
use hyperlane_core::rpc_clients::call_and_retry_indefinitely;
use hyperlane_core::{ChainCommunicationError, ChainResult, MerkleTreeHook};
use hyperlane_core::{ChainResult, MerkleTreeHook};
use prometheus::IntGauge;
use tokio::time::sleep;
use tracing::{debug, error, info};
@ -61,17 +61,8 @@ impl ValidatorSubmitter {
/// Runs idly forever once the target checkpoint is reached to avoid exiting the task.
pub(crate) async fn backfill_checkpoint_submitter(self, target_checkpoint: Checkpoint) {
let mut tree = IncrementalMerkle::default();
call_and_retry_indefinitely(|| {
let target_checkpoint = target_checkpoint;
let self_clone = self.clone();
Box::pin(async move {
self_clone
.submit_checkpoints_until_correctness_checkpoint(&mut tree, &target_checkpoint)
.await?;
Ok(())
})
})
.await;
self.submit_checkpoints_until_correctness_checkpoint(&mut tree, &target_checkpoint)
.await;
info!(
?target_checkpoint,
@ -132,21 +123,8 @@ impl ValidatorSubmitter {
sleep(self.interval).await;
continue;
}
tree = call_and_retry_indefinitely(|| {
let mut tree = tree;
let self_clone = self.clone();
Box::pin(async move {
self_clone
.submit_checkpoints_until_correctness_checkpoint(
&mut tree,
&latest_checkpoint,
)
.await?;
Ok(tree)
})
})
.await;
self.submit_checkpoints_until_correctness_checkpoint(&mut tree, &latest_checkpoint)
.await;
self.metrics
.latest_checkpoint_processed
@ -162,7 +140,7 @@ impl ValidatorSubmitter {
&self,
tree: &mut IncrementalMerkle,
correctness_checkpoint: &Checkpoint,
) -> ChainResult<()> {
) {
// This should never be called with a tree that is ahead of the correctness checkpoint.
assert!(
!tree_exceeds_checkpoint(correctness_checkpoint, tree),
@ -182,7 +160,14 @@ impl ValidatorSubmitter {
while tree.count() as u32 <= correctness_checkpoint.index {
if let Some(insertion) = self
.message_db
.retrieve_merkle_tree_insertion_by_leaf_index(&(tree.count() as u32))?
.retrieve_merkle_tree_insertion_by_leaf_index(&(tree.count() as u32))
.unwrap_or_else(|err| {
panic!(
"Error fetching merkle tree insertion for leaf index {}: {}",
tree.count(),
err
)
})
{
debug!(
index = insertion.index(),
@ -225,9 +210,7 @@ impl ValidatorSubmitter {
?correctness_checkpoint,
"Incorrect tree root, something went wrong"
);
return Err(ChainCommunicationError::CustomError(
"Incorrect tree root, something went wrong".to_string(),
));
panic!("Incorrect tree root, something went wrong");
}
if !checkpoint_queue.is_empty() {
@ -236,57 +219,71 @@ impl ValidatorSubmitter {
queue_len = checkpoint_queue.len(),
"Reached tree consistency"
);
self.sign_and_submit_checkpoints(checkpoint_queue).await?;
self.sign_and_submit_checkpoints(checkpoint_queue).await;
info!(
index = checkpoint.index,
"Signed all queued checkpoints until index"
);
}
Ok(())
}
/// Signs and submits any previously unsubmitted checkpoints.
async fn sign_and_submit_checkpoints(
async fn sign_and_submit_checkpoint(
&self,
checkpoints: Vec<CheckpointWithMessageId>,
checkpoint: CheckpointWithMessageId,
) -> ChainResult<()> {
let last_checkpoint = checkpoints.as_slice()[checkpoints.len() - 1];
for queued_checkpoint in checkpoints {
let existing = self
.checkpoint_syncer
.fetch_checkpoint(queued_checkpoint.index)
.await?;
if existing.is_some() {
debug!(
index = queued_checkpoint.index,
"Checkpoint already submitted"
);
continue;
}
let signed_checkpoint = self.signer.sign(queued_checkpoint).await?;
self.checkpoint_syncer
.write_checkpoint(&signed_checkpoint)
.await?;
debug!(
index = queued_checkpoint.index,
"Signed and submitted checkpoint"
);
// TODO: move these into S3 implementations
// small sleep before signing next checkpoint to avoid rate limiting
sleep(Duration::from_millis(100)).await;
let existing = self
.checkpoint_syncer
.fetch_checkpoint(checkpoint.index)
.await?;
if existing.is_some() {
debug!(index = checkpoint.index, "Checkpoint already submitted");
return Ok(());
}
let signed_checkpoint = self.signer.sign(checkpoint).await?;
self.checkpoint_syncer
.update_latest_index(last_checkpoint.index)
.write_checkpoint(&signed_checkpoint)
.await?;
debug!(index = checkpoint.index, "Signed and submitted checkpoint");
// TODO: move these into S3 implementations
// small sleep before signing next checkpoint to avoid rate limiting
sleep(Duration::from_millis(100)).await;
Ok(())
}
/// Signs and submits any previously unsubmitted checkpoints.
async fn sign_and_submit_checkpoints(&self, checkpoints: Vec<CheckpointWithMessageId>) {
let last_checkpoint = checkpoints.as_slice()[checkpoints.len() - 1];
// Submits checkpoints to the store in reverse order. This speeds up processing historic checkpoints (those before the validator is spun up),
// since those are the most likely to make messages become processable.
// A side effect is that new checkpoints will also be submitted in reverse order.
for queued_checkpoint in checkpoints.into_iter().rev() {
// certain checkpoint stores rate limit very aggressively, so we retry indefinitely
call_and_retry_indefinitely(|| {
let self_clone = self.clone();
Box::pin(async move {
self_clone
.sign_and_submit_checkpoint(queued_checkpoint)
.await?;
Ok(())
})
})
.await;
}
call_and_retry_indefinitely(|| {
let self_clone = self.clone();
Box::pin(async move {
self_clone
.checkpoint_syncer
.update_latest_index(last_checkpoint.index)
.await?;
Ok(())
})
})
.await;
}
}
/// Returns whether the tree exceeds the checkpoint.

@ -1,6 +1,5 @@
use async_trait::async_trait;
use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
use futures::future;
use hyperlane_core::{
ChainCommunicationError, ChainResult, ContractLocator, HyperlaneChain, HyperlaneContract,
HyperlaneDomain, HyperlaneProvider, Indexed, Indexer, InterchainGasPaymaster,
@ -9,12 +8,15 @@ use hyperlane_core::{
use once_cell::sync::Lazy;
use std::ops::RangeInclusive;
use tendermint::abci::EventAttribute;
use tracing::{instrument, warn};
use tracing::instrument;
use crate::{
rpc::{CosmosWasmIndexer, ParsedEvent, WasmIndexer},
signers::Signer,
utils::{CONTRACT_ADDRESS_ATTRIBUTE_KEY, CONTRACT_ADDRESS_ATTRIBUTE_KEY_BASE64},
utils::{
execute_and_parse_log_futures, CONTRACT_ADDRESS_ATTRIBUTE_KEY,
CONTRACT_ADDRESS_ATTRIBUTE_KEY_BASE64,
},
ConnectionConf, CosmosProvider, HyperlaneCosmosError,
};
@ -223,26 +225,7 @@ impl Indexer<InterchainGasPayment> for CosmosInterchainGasPaymasterIndexer {
})
.collect();
// TODO: this can be refactored when we rework indexing, to be part of the block-by-block indexing
let result = future::join_all(logs_futures)
.await
.into_iter()
.flatten()
.map(|(logs, block_number)| {
if let Err(err) = &logs {
warn!(?err, ?block_number, "Failed to fetch logs for block");
}
logs
})
// Propagate errors from any of the queries. This will cause the entire range to be retried,
// including successful ones, but we don't have a way to handle partial failures in a range for now.
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.flatten()
.map(|(log, meta)| (Indexed::new(log), meta))
.collect();
Ok(result)
execute_and_parse_log_futures(logs_futures).await
}
async fn get_finalized_block_number(&self) -> ChainResult<u32> {

@ -1,5 +1,4 @@
use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
use futures::future;
use std::{
fmt::{Debug, Formatter},
io::Cursor,
@ -8,14 +7,15 @@ use std::{
str::FromStr,
};
use crate::payloads::mailbox::{
GeneralMailboxQuery, ProcessMessageRequest, ProcessMessageRequestInner,
};
use crate::payloads::{general, mailbox};
use crate::rpc::{CosmosWasmIndexer, ParsedEvent, WasmIndexer};
use crate::CosmosProvider;
use crate::{address::CosmosAddress, types::tx_response_to_outcome};
use crate::{grpc::WasmProvider, HyperlaneCosmosError};
use crate::{
payloads::mailbox::{GeneralMailboxQuery, ProcessMessageRequest, ProcessMessageRequestInner},
utils::execute_and_parse_log_futures,
};
use crate::{signers::Signer, utils::get_block_height_for_lag, ConnectionConf};
use async_trait::async_trait;
use cosmrs::proto::cosmos::base::abci::v1beta1::TxResponse;
@ -371,23 +371,7 @@ impl Indexer<HyperlaneMessage> for CosmosMailboxIndexer {
})
.collect();
// TODO: this can be refactored when we rework indexing, to be part of the block-by-block indexing
let result = future::join_all(logs_futures)
.await
.into_iter()
.flatten()
.filter_map(|(logs_res, block_number)| match logs_res {
Ok(logs) => Some(logs),
Err(err) => {
warn!(?err, ?block_number, "Failed to fetch logs for block");
None
}
})
.flatten()
.map(|(log, meta)| (log.into(), meta))
.collect();
Ok(result)
execute_and_parse_log_futures(logs_futures).await
}
async fn get_finalized_block_number(&self) -> ChainResult<u32> {

@ -2,7 +2,6 @@ use std::{fmt::Debug, num::NonZeroU64, ops::RangeInclusive, str::FromStr};
use async_trait::async_trait;
use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
use futures::future;
use hyperlane_core::{
accumulator::incremental::IncrementalMerkle, ChainCommunicationError, ChainResult, Checkpoint,
ContractLocator, HyperlaneChain, HyperlaneContract, HyperlaneDomain, HyperlaneProvider,
@ -10,17 +9,14 @@ use hyperlane_core::{
};
use once_cell::sync::Lazy;
use tendermint::abci::EventAttribute;
use tracing::{instrument, warn};
use tracing::instrument;
use crate::{
grpc::WasmProvider,
payloads::{
general::{self},
merkle_tree_hook,
},
payloads::{general, merkle_tree_hook},
rpc::{CosmosWasmIndexer, ParsedEvent, WasmIndexer},
utils::{
get_block_height_for_lag, CONTRACT_ADDRESS_ATTRIBUTE_KEY,
execute_and_parse_log_futures, get_block_height_for_lag, CONTRACT_ADDRESS_ATTRIBUTE_KEY,
CONTRACT_ADDRESS_ATTRIBUTE_KEY_BASE64,
},
ConnectionConf, CosmosProvider, HyperlaneCosmosError, Signer,
@ -304,23 +300,7 @@ impl Indexer<MerkleTreeInsertion> for CosmosMerkleTreeHookIndexer {
})
.collect();
// TODO: this can be refactored when we rework indexing, to be part of the block-by-block indexing
let result = future::join_all(logs_futures)
.await
.into_iter()
.flatten()
.filter_map(|(logs_res, block_number)| match logs_res {
Ok(logs) => Some(logs),
Err(err) => {
warn!(?err, ?block_number, "Failed to fetch logs for block");
None
}
})
.flatten()
.map(|(log, meta)| (log.into(), meta))
.collect();
Ok(result)
execute_and_parse_log_futures(logs_futures).await
}
/// Get the chain's latest block number that has reached finality

@ -277,7 +277,7 @@ impl WasmGrpcProvider {
let raw_tx = TxRaw {
body_bytes: sign_doc.body_bytes,
auth_info_bytes: sign_doc.auth_info_bytes,
// The poorly documented trick to simuluating a tx without a valid signature is to just pass
// The poorly documented trick to simulating a tx without a valid signature is to just pass
// in a single empty signature. Taken from cosmjs:
// https://github.com/cosmos/cosmjs/blob/44893af824f0712d1f406a8daa9fcae335422235/packages/stargate/src/modules/tx/queries.ts#L67
signatures: vec![vec![]],

@ -1,6 +1,5 @@
use async_trait::async_trait;
use cosmrs::rpc::client::Client;
use hyperlane_core::rpc_clients::call_with_retry;
use hyperlane_core::{ChainCommunicationError, ChainResult, ContractLocator, LogMeta, H256, U256};
use sha256::digest;
use std::fmt::Debug;
@ -216,9 +215,7 @@ impl CosmosWasmIndexer {
impl WasmIndexer for CosmosWasmIndexer {
#[instrument(err, skip(self))]
async fn get_finalized_block_number(&self) -> ChainResult<u32> {
let latest_block =
call_with_retry(move || Box::pin(Self::get_latest_block(self.provider.rpc().clone())))
.await?;
let latest_block = Self::get_latest_block(self.provider.rpc().clone()).await?;
let latest_height: u32 = latest_block
.block
.header
@ -242,11 +239,11 @@ impl WasmIndexer for CosmosWasmIndexer {
let client = self.provider.rpc().clone();
debug!(?block_number, cursor_label, domain=?self.provider.domain, "Getting logs in block");
let (block, block_results) = tokio::join!(
call_with_retry(|| { Box::pin(Self::get_block(client.clone(), block_number)) }),
call_with_retry(|| { Box::pin(Self::get_block_results(client.clone(), block_number)) }),
);
// The two calls below could be made in parallel, but on cosmos rate limiting is a bigger problem
// than indexing latency, so we do them sequentially.
let block = Self::get_block(client.clone(), block_number).await?;
let block_results = Self::get_block_results(client.clone(), block_number).await?;
Ok(self.handle_txs(block?, block_results?, parser, cursor_label))
Ok(self.handle_txs(block, block_results, parser, cursor_label))
}
}

@ -1,8 +1,11 @@
use std::num::NonZeroU64;
use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
use hyperlane_core::ChainResult;
use futures::future;
use hyperlane_core::{ChainCommunicationError, ChainResult, Indexed, LogMeta};
use once_cell::sync::Lazy;
use tokio::task::JoinHandle;
use tracing::warn;
use crate::grpc::{WasmGrpcProvider, WasmProvider};
@ -31,6 +34,32 @@ pub(crate) async fn get_block_height_for_lag(
Ok(block_height)
}
#[allow(clippy::type_complexity)]
pub(crate) async fn execute_and_parse_log_futures<T: Into<Indexed<T>>>(
logs_futures: Vec<JoinHandle<(Result<Vec<(T, LogMeta)>, ChainCommunicationError>, u32)>>,
) -> ChainResult<Vec<(Indexed<T>, LogMeta)>> {
// TODO: this can be refactored when we rework indexing, to be part of the block-by-block indexing
let result = future::join_all(logs_futures)
.await
.into_iter()
.flatten()
.map(|(logs, block_number)| {
if let Err(err) = &logs {
warn!(?err, ?block_number, "Failed to fetch logs for block");
}
logs
})
// Propagate errors from any of the queries. This will cause the entire range to be retried,
// including successful ones, but we don't have a way to handle partial failures in a range for now.
// This is also why cosmos indexing should be run with small chunks (currently set to 5).
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.flatten()
.map(|(log, meta)| (log.into(), meta))
.collect();
Ok(result)
}
#[cfg(test)]
/// Helper function to create a Vec<EventAttribute> from a JSON string -
/// crate::payloads::general::EventAttribute has a Deserialize impl while

@ -514,6 +514,60 @@
},
"validatorAnnounce": "0xCe74905e51497b4adD3639366708b821dcBcff96"
},
"fraxtal": {
"aggregationHook": "0xD7ff06cDd83642D648baF0d36f77e79349120dA4",
"blockExplorers": [
{
"apiUrl": "https://api.fraxscan.com/api",
"family": "etherscan",
"name": "Fraxscan",
"url": "https://fraxscan.com"
}
],
"blocks": {
"confirmations": 1,
"estimateBlockTime": 2,
"reorgPeriod": 1
},
"chainId": 252,
"displayName": "Fraxtal",
"domainId": 252,
"domainRoutingIsm": "0x0CA20946c1b7367Bd47C0a35E8feD23a4Ff59B9a",
"domainRoutingIsmFactory": "0x3a464f746D23Ab22155710f44dB16dcA53e0775E",
"fallbackRoutingHook": "0xC077A0Cc408173349b1c9870C667B40FE3C01dd7",
"gasCurrencyCoinGeckoId": "frax-ether",
"index": {
"from": 5350807
},
"interchainGasPaymaster": "0x2Fca7f6eC3d4A0408900f2BB30004d4616eE985E",
"interchainSecurityModule": "0xf1465DB845d0978e74d45EF195734b43bB739094",
"mailbox": "0x2f9DB5616fa3fAd1aB06cB2C906830BA63d135e3",
"merkleTreeHook": "0x8358D8291e3bEDb04804975eEa0fe9fe0fAfB147",
"name": "fraxtal",
"nativeToken": {
"decimals": 18,
"name": "Frax Ether",
"symbol": "frxETH"
},
"pausableHook": "0x4E1c88DD261BEe2941e6c1814597e30F53330428",
"pausableIsm": "0x26f32245fCF5Ad53159E875d5Cae62aEcf19c2d4",
"protocol": "ethereum",
"protocolFee": "0xD1E267d2d7876e97E217BfE61c34AB50FEF52807",
"proxyAdmin": "0x3a867fCfFeC2B790970eeBDC9023E75B0a172aa7",
"rpcUrls": [
{
"http": "https://rpc.frax.com"
}
],
"staticAggregationHookFactory": "0xeA87ae93Fa0019a82A727bfd3eBd1cFCa8f64f1D",
"staticAggregationIsm": "0xcA26D50602efA9d835b01A142Ae218f59aa60433",
"staticAggregationIsmFactory": "0x2f2aFaE1139Ce54feFC03593FeE8AB2aDF4a85A7",
"staticMerkleRootMultisigIsmFactory": "0x0761b0827849abbf7b0cC09CE14e1C93D87f5004",
"staticMessageIdMultisigIsmFactory": "0x4Ed7d626f1E96cD1C0401607Bf70D95243E3dEd1",
"storageGasOracle": "0x5060eCD5dFAD300A90592C04e504600A7cdcF70b",
"testRecipient": "0x62B7592C1B6D1E43f4630B8e37f4377097840C05",
"validatorAnnounce": "0x1956848601549de5aa0c887892061fA5aB4f6fC4"
},
"gnosis": {
"aggregationHook": "0xdD1FA1C12496474c1dDC67a658Ba81437F818861",
"blockExplorers": [
@ -661,7 +715,7 @@
}
],
"index": {
"chunk": 25,
"chunk": 5,
"from": 58419500
},
"interchainGasPaymaster": "0x27ae52298e5b53b34b7ae0ca63e05845c31e1f59",
@ -688,6 +742,61 @@
"slip44": 118,
"validatorAnnounce": "0x1fb225b2fcfbe75e614a1d627de97ff372242eed"
},
"linea": {
"aggregationHook": "0x43fF73dF1E170D076D9Ed30d4C6922A9D34322dE",
"blockExplorers": [
{
"apiUrl": "https://api.lineascan.build/api",
"family": "etherscan",
"name": "LineaScan",
"url": "https://lineascan.build"
}
],
"blocks": {
"confirmations": 1,
"estimateBlockTime": 3,
"reorgPeriod": 1
},
"chainId": 59144,
"displayName": "Linea",
"domainId": 59144,
"domainRoutingIsm": "0x6faCF71D804964Ca62f16e56DE74d7dF38FdC3F0",
"domainRoutingIsmFactory": "0x3a464f746D23Ab22155710f44dB16dcA53e0775E",
"fallbackRoutingHook": "0x4E1c88DD261BEe2941e6c1814597e30F53330428",
"gasCurrencyCoinGeckoId": "ethereum",
"gnosisSafeTransactionServiceUrl": "https://transaction.safe.linea.build",
"index": {
"from": 5154574
},
"interchainGasPaymaster": "0x8105a095368f1a184CceA86cCe21318B5Ee5BE28",
"interchainSecurityModule": "0xF8aD4EB8aBA13ae546B8D01501c63e4543Ff0660",
"mailbox": "0x02d16BC51af6BfD153d67CA61754cF912E82C4d9",
"merkleTreeHook": "0xC077A0Cc408173349b1c9870C667B40FE3C01dd7",
"name": "linea",
"nativeToken": {
"decimals": 18,
"name": "Ether",
"symbol": "ETH"
},
"pausableHook": "0x5060eCD5dFAD300A90592C04e504600A7cdcF70b",
"pausableIsm": "0x01aA8200936B475762Ee28D38B43a6cFe9076E52",
"protocol": "ethereum",
"protocolFee": "0x7556a0E61d577D921Cba8Fca0d7D6299d36E607E",
"proxyAdmin": "0x7f50C5776722630a0024fAE05fDe8b47571D7B39",
"rpcUrls": [
{
"http": "https://rpc.linea.build"
}
],
"staticAggregationHookFactory": "0xeA87ae93Fa0019a82A727bfd3eBd1cFCa8f64f1D",
"staticAggregationIsm": "0xF8aD4EB8aBA13ae546B8D01501c63e4543Ff0660",
"staticAggregationIsmFactory": "0x2f2aFaE1139Ce54feFC03593FeE8AB2aDF4a85A7",
"staticMerkleRootMultisigIsmFactory": "0x0761b0827849abbf7b0cC09CE14e1C93D87f5004",
"staticMessageIdMultisigIsmFactory": "0x4Ed7d626f1E96cD1C0401607Bf70D95243E3dEd1",
"storageGasOracle": "0x781bE492F1232E66990d83a9D3AC3Ec26f56DAfB",
"testRecipient": "0x273Bc6b01D9E88c064b6E5e409BdF998246AEF42",
"validatorAnnounce": "0x62B7592C1B6D1E43f4630B8e37f4377097840C05"
},
"mantapacific": {
"aggregationHook": "0x8464aF853363B8d6844070F68b0AB34Cb6523d0F",
"blockExplorers": [
@ -898,7 +1007,7 @@
}
],
"index": {
"chunk": 50,
"chunk": 5,
"from": 4000000
},
"interchainGasPaymaster": "0x504ee9ac43ec5814e00c7d21869a90ec52becb489636bdf893b7df9d606b5d67",
@ -992,13 +1101,25 @@
},
"osmosis": {
"bech32Prefix": "osmo",
"blockExplorers": [
{
"apiUrl": "https://www.mintscan.io/osmosis",
"family": "other",
"name": "Mintscan",
"url": "https://www.mintscan.io/osmosis"
}
],
"blocks": {
"confirmations": 1,
"estimateBlockTime": 3,
"reorgPeriod": 1
},
"canonicalAsset": "uosmo",
"chainId": "osmosis-1",
"contractAddressBytes": 32,
"domainId": "875",
"displayName": "Osmosis",
"domainId": 875,
"gasCurrencyCoinGeckoId": "osmosis",
"gasPrice": {
"amount": "0.025",
"denom": "uosmo"
@ -1009,18 +1130,37 @@
}
],
"index": {
"chunk": 5,
"from": 14389169
},
"interchainGasPaymaster": "0xd20a9dcf61939fc2fe6ad501b9457b1029b3cc7ab12ed72675ea2e10d831ee5d",
"isTestnet": false,
"mailbox": "0x9493e39d85dd038022f97d88aba6bff98d98f9a016b4f2e498bf1d9898420172",
"merkleTreeHook": "0x8920e062ee5ed8afccbc155d13ea9049296399ee41403655864fcd243edc7388",
"name": "osmosis1",
"name": "osmosis",
"nativeToken": {
"decimals": 6,
"denom": "uosmo",
"name": "Osmosis",
"symbol": "OSMO"
},
"protocol": "cosmos",
"restUrls": [
{
"http": "https://osmosis-rest.publicnode.com"
}
],
"rpcUrls": [
{
"http": "https://osmosis-rpc.publicnode.com:443"
"http": "https://osmosis-rpc.publicnode.com"
}
],
"signer": {
"key": "0x5486418967eabc770b0fcb995f7ef6d9a72f7fc195531ef76c5109f44f51af26",
"prefix": "osmo",
"type": "cosmosKey"
},
"slip44": 118,
"validatorAnnounce": "0xaf867da5b09a20ee49161d57f99477c0c42d100f34eb53da0d2eb7fc6c257235"
},
"polygon": {
@ -1152,6 +1292,9 @@
"storageGasOracle": "0x19dc38aeae620380430C200a6E990D5Af5480117",
"testRecipient": "0xD127D4549cb4A5B2781303a4fE99a10EAd13263A",
"timelockController": "0x0000000000000000000000000000000000000000",
"transactionOverrides": {
"gasPrice": 1000000000
},
"validatorAnnounce": "0x2fa5F5C96419C222cDbCeC797D696e6cE428A7A9"
},
"redstone": {
@ -1270,6 +1413,63 @@
},
"validatorAnnounce": "0xd83A4F747fE80Ed98839e05079B1B7Fe037b1638"
},
"sei": {
"aggregationHook": "0x40514BD46C57455933Be8BAedE96C4F0Ba3507D6",
"blockExplorers": [
{
"apiUrl": "https://seitrace.com/pacific-1/api",
"family": "etherscan",
"name": "Seitrace",
"url": "https://seitrace.com"
}
],
"blocks": {
"confirmations": 1,
"estimateBlockTime": 1,
"reorgPeriod": 1
},
"chainId": 1329,
"displayName": "Sei",
"domainId": 1329,
"domainRoutingIsm": "0xBD70Ea9D599a0FC8158B026797177773C3445730",
"domainRoutingIsmFactory": "0x1052eF3419f26Bec74Ed7CEf4a4FA6812Bc09908",
"fallbackRoutingHook": "0xB3fCcD379ad66CED0c91028520C64226611A48c9",
"gasCurrencyCoinGeckoId": "sei-network",
"index": {
"from": 80809403
},
"interchainGasPaymaster": "0xFC62DeF1f08793aBf0E67f69257c6be258194F72",
"interchainSecurityModule": "0x10FF77061869714E92E574FcE9025a419f5b999d",
"mailbox": "0x2f2aFaE1139Ce54feFC03593FeE8AB2aDF4a85A7",
"merkleTreeHook": "0xca1b69fA4c4a7c7fD839bC50867c589592bcfe49",
"name": "sei",
"nativeToken": {
"decimals": 18,
"name": "Sei",
"symbol": "SEI"
},
"pausableHook": "0xea820f9BCFD5E16a0dd42071EB61A29874Ad81A4",
"pausableIsm": "0x931dFCc8c1141D6F532FD023bd87DAe0080c835d",
"protocol": "ethereum",
"protocolFee": "0x83c2DB237e93Ce52565AB110124f78fdf159E3f4",
"proxyAdmin": "0x0761b0827849abbf7b0cC09CE14e1C93D87f5004",
"rpcUrls": [
{
"http": "https://evm-rpc.sei-apis.com"
}
],
"staticAggregationHookFactory": "0xEb9FcFDC9EfDC17c1EC5E1dc085B98485da213D6",
"staticAggregationIsm": "0x596eCC936068AeBD836e79D530043b868569a61B",
"staticAggregationIsmFactory": "0x8F7454AC98228f3504Bb91eA3D8Adafe6406110A",
"staticMerkleRootMultisigIsmFactory": "0x2C1FAbEcd7bFBdEBF27CcdB67baADB38b6Df90fC",
"staticMessageIdMultisigIsmFactory": "0x8b83fefd896fAa52057798f6426E9f0B080FCCcE",
"storageGasOracle": "0x26f32245fCF5Ad53159E875d5Cae62aEcf19c2d4",
"testRecipient": "0xdB670e1a1e312BF17425b08cE55Bdf2cD8F8eD54",
"transactionOverrides": {
"gasPrice": 2000000000
},
"validatorAnnounce": "0x5332D1AC0A626D265298c14ff681c0A8D28dB86d"
},
"viction": {
"aggregationHook": "0x5c7890FAf9c99dC55926F00d624D7Bc6D7ac6834",
"blockExplorers": [

@ -23,17 +23,17 @@ ed25519-dalek.workspace = true
ethers.workspace = true
eyre.workspace = true
fuels.workspace = true
futures.worksapce = true
futures.workspace = true
futures-util.workspace = true
itertools.workspace = true
maplit.workspace = true
mockall.worksapce = true
mockall.workspace = true
paste.workspace = true
prometheus.workspace = true
rocksdb.workspace = true
serde.workspace = true
serde_json.workspace = true
solana-sdk.worksapce = true
solana-sdk.workspace = true
static_assertions.workspace = true
tempfile = { workspace = true, optional = true }
thiserror.workspace = true
@ -46,7 +46,6 @@ url.workspace = true
warp.workspace = true
ya-gcp.workspace = true
backtrace = { workspace = true, optional = true }
backtrace-oneline = { path = "../utils/backtrace-oneline", optional = true }
@ -58,7 +57,6 @@ hyperlane-sealevel = { path = "../chains/hyperlane-sealevel" }
hyperlane-cosmos = { path = "../chains/hyperlane-cosmos"}
hyperlane-test = { path = "../hyperlane-test" }
# dependency version is determined by etheres
rusoto_core = "*"
rusoto_kms = "*"

@ -546,10 +546,10 @@ pub(crate) mod test {
pub sequence: u32,
}
impl Into<Indexed<MockSequencedData>> for MockSequencedData {
fn into(self) -> Indexed<MockSequencedData> {
let sequence = self.sequence;
Indexed::new(self).with_sequence(sequence)
impl From<MockSequencedData> for Indexed<MockSequencedData> {
fn from(val: MockSequencedData) -> Self {
let sequence = val.sequence;
Indexed::new(val).with_sequence(sequence)
}
}

@ -7,7 +7,7 @@ use hyperlane_core::{
GasPaymentKey, HyperlaneDomain, HyperlaneLogStore, HyperlaneMessage,
HyperlaneSequenceAwareIndexerStoreReader, HyperlaneWatermarkedLogStore, Indexed,
InterchainGasExpenditure, InterchainGasPayment, InterchainGasPaymentMeta, LogMeta,
MerkleTreeInsertion, H256,
MerkleTreeInsertion, PendingOperationStatus, H256,
};
use super::{
@ -27,6 +27,7 @@ const HIGHEST_SEEN_MESSAGE_NONCE: &str = "highest_seen_message_nonce_";
const GAS_PAYMENT_FOR_MESSAGE_ID: &str = "gas_payment_sequence_for_message_id_v2_";
const GAS_PAYMENT_META_PROCESSED: &str = "gas_payment_meta_processed_v3_";
const GAS_EXPENDITURE_FOR_MESSAGE_ID: &str = "gas_expenditure_for_message_id_v2_";
const STATUS_BY_MESSAGE_ID: &str = "status_by_message_id_";
const PENDING_MESSAGE_RETRY_COUNT_FOR_MESSAGE_ID: &str =
"pending_message_retry_count_for_message_id_";
const MERKLE_TREE_INSERTION: &str = "merkle_tree_insertion_";
@ -501,6 +502,13 @@ make_store_and_retrieve!(pub(self), dispatched_block_number_by_nonce, MESSAGE_DI
make_store_and_retrieve!(pub, processed_by_nonce, NONCE_PROCESSED, u32, bool);
make_store_and_retrieve!(pub(self), processed_by_gas_payment_meta, GAS_PAYMENT_META_PROCESSED, InterchainGasPaymentMeta, bool);
make_store_and_retrieve!(pub(self), interchain_gas_expenditure_data_by_message_id, GAS_EXPENDITURE_FOR_MESSAGE_ID, H256, InterchainGasExpenditureData);
make_store_and_retrieve!(
pub,
status_by_message_id,
STATUS_BY_MESSAGE_ID,
H256,
PendingOperationStatus
);
make_store_and_retrieve!(pub(self), interchain_gas_payment_data_by_gas_payment_key, GAS_PAYMENT_FOR_MESSAGE_ID, GasPaymentKey, InterchainGasPaymentData);
make_store_and_retrieve!(pub(self), gas_payment_by_sequence, GAS_PAYMENT_BY_SEQUENCE, u32, InterchainGasPayment);
make_store_and_retrieve!(pub(self), gas_payment_block_by_sequence, GAS_PAYMENT_BY_SEQUENCE, u32, u64);

@ -12,6 +12,9 @@ pub mod settings;
mod agent;
pub use agent::*;
/// The local database used by agents
pub mod db;
pub mod metrics;
pub use metrics::*;
@ -28,8 +31,5 @@ pub use traits::*;
mod types;
pub use types::*;
/// Hyperlane database utils
pub mod db;
#[cfg(feature = "oneline-eyre")]
pub mod oneline_eyre;

@ -132,7 +132,7 @@ impl CoreMetrics {
"Submitter queue length",
const_labels_ref
),
&["remote", "queue_name", "app_context"],
&["remote", "queue_name", "operation_status", "app_context"],
registry
)?;

@ -65,9 +65,6 @@
pub use base::*;
pub use chains::*;
pub use checkpoint_syncer::*;
/// Export this so they don't need to import paste.
#[doc(hidden)]
pub use paste;
pub use signers::*;
pub use trace::*;

@ -37,6 +37,7 @@ serde = { workspace = true }
serde_json = { workspace = true }
sha3 = { workspace = true }
strum = { workspace = true, optional = true, features = ["derive"] }
strum_macros = { workspace = true, optional = true }
thiserror = { workspace = true }
tokio = { workspace = true, optional = true, features = ["rt", "time"] }
tracing.workspace = true

@ -7,7 +7,7 @@ use crate::accumulator::{
H256, TREE_DEPTH, ZERO_HASHES,
};
#[derive(BorshDeserialize, BorshSerialize, Debug, Clone, Copy, new, PartialEq, Eq)]
#[derive(BorshDeserialize, BorshSerialize, Debug, Clone, new, PartialEq, Eq)]
/// An incremental merkle tree, modeled on the eth2 deposit contract
pub struct IncrementalMerkle {
/// The branch of the tree

@ -80,6 +80,8 @@ pub enum KnownHyperlaneDomain {
Neutron = 1853125230,
Osmosis = 875,
Injective = 6909546,
InEvm = 2525,
@ -97,6 +99,12 @@ pub enum KnownHyperlaneDomain {
PlumeTestnet = 161221135,
Fraxtal = 252,
Linea = 59144,
Sei = 1329,
// -- Local test chains --
/// Test1 local chain
Test1 = 13371,
@ -229,7 +237,7 @@ impl KnownHyperlaneDomain {
Mainnet: [
Ethereum, Avalanche, Arbitrum, Polygon, Optimism, BinanceSmartChain, Celo,
Moonbeam, Gnosis, MantaPacific, Neutron, Injective, InEvm, Ancient8, Blast,
Mode, Redstone, Viction, Zetachain
Mode, Redstone, Viction, Zetachain, Fraxtal, Linea, Sei, Osmosis
],
Testnet: [
Alfajores, MoonbaseAlpha, Sepolia, ScrollSepolia, Chiado, PlumeTestnet, Fuji, BinanceSmartChainTestnet, Holesky
@ -247,11 +255,12 @@ impl KnownHyperlaneDomain {
Optimism, BinanceSmartChain, BinanceSmartChainTestnet, Celo, Gnosis,
Alfajores, Moonbeam, InEvm, Ancient8, Blast, Mode, Redstone, Viction,
Zetachain, MoonbaseAlpha, ScrollSepolia, Chiado, MantaPacific, PlumeTestnet,
Fraxtal, Linea, Sei,
Test1, Test2, Test3
],
HyperlaneDomainProtocol::Fuel: [FuelTest1],
HyperlaneDomainProtocol::Sealevel: [SealevelTest1, SealevelTest2],
HyperlaneDomainProtocol::Cosmos: [CosmosTest99990, CosmosTest99991, Neutron, Injective],
HyperlaneDomainProtocol::Cosmos: [CosmosTest99990, CosmosTest99991, Neutron, Osmosis, Injective],
})
}
@ -264,7 +273,7 @@ impl KnownHyperlaneDomain {
Ethereum, Sepolia, Holesky, Polygon, Avalanche, Fuji, Optimism,
BinanceSmartChain, BinanceSmartChainTestnet, Celo, Gnosis, Alfajores, Moonbeam, MoonbaseAlpha,
ScrollSepolia, Chiado, MantaPacific, Neutron, Injective, InEvm, Ancient8, Blast, Mode, Redstone,
Viction, Zetachain,
Viction, Zetachain, Fraxtal, Linea, Sei, Osmosis,
Test1, Test2, Test3, FuelTest1, SealevelTest1, SealevelTest2, CosmosTest99990, CosmosTest99991
],
})

@ -34,14 +34,6 @@ pub async fn call_and_retry_n_times<T>(
))
}
/// Retry calling a fallible async function a predefined number of times
#[instrument(err, skip(f))]
pub async fn call_with_retry<T>(
f: impl FnMut() -> Pin<Box<dyn Future<Output = ChainResult<T>> + Send>>,
) -> ChainResult<T> {
call_and_retry_n_times(f, DEFAULT_MAX_RPC_RETRIES).await
}
/// Retry calling a fallible async function indefinitely, until it succeeds
pub async fn call_and_retry_indefinitely<T>(
f: impl FnMut() -> Pin<Box<dyn Future<Output = ChainResult<T>> + Send>>,

@ -1,15 +1,18 @@
use serde::{Deserialize, Serialize};
use std::{
cmp::Ordering,
fmt::{Debug, Display},
io::Write,
time::{Duration, Instant},
};
use crate::{
ChainResult, FixedPointNumber, HyperlaneDomain, HyperlaneMessage, TryBatchAs, TxOutcome, H256,
U256,
ChainResult, Decode, Encode, FixedPointNumber, HyperlaneDomain, HyperlaneMessage,
HyperlaneProtocolError, TryBatchAs, TxOutcome, H256, U256,
};
use async_trait::async_trait;
use num::CheckedDiv;
use strum::Display;
use tracing::warn;
/// Boxed operation that can be stored in an operation queue
@ -49,12 +52,22 @@ pub trait PendingOperation: Send + Sync + Debug + TryBatchAs<HyperlaneMessage> {
/// The domain this originates from.
fn origin_domain_id(&self) -> u32;
/// Get the operation status from the local db, if there is one
fn retrieve_status_from_db(&self) -> Option<PendingOperationStatus>;
/// The domain this operation will take place on.
fn destination_domain(&self) -> &HyperlaneDomain;
/// Label to use for metrics granularity.
fn app_context(&self) -> Option<String>;
/// The status of the operation, which should explain why it is in the
/// queue.
fn status(&self) -> PendingOperationStatus;
/// Set the status of the operation.
fn set_status(&mut self, status: PendingOperationStatus);
/// Get tuple of labels for metrics.
fn get_operation_labels(&self) -> (String, String) {
let app_context = self.app_context().unwrap_or("Unknown".to_string());
@ -106,6 +119,108 @@ pub trait PendingOperation: Send + Sync + Debug + TryBatchAs<HyperlaneMessage> {
fn set_retries(&mut self, retries: u32);
}
#[derive(Debug, Display, Clone, Serialize, Deserialize, PartialEq)]
/// Status of a pending operation
/// WARNING: This enum is serialized to JSON and stored in the database, so to keep backwards compatibility, we shouldn't remove or rename any variants.
/// Adding new variants is fine.
pub enum PendingOperationStatus {
/// The operation is ready to be prepared for the first time, or has just been loaded from storage
FirstPrepareAttempt,
/// The operation is ready to be prepared again, with the given reason
#[strum(to_string = "Retry({0})")]
Retry(ReprepareReason),
/// The operation is ready to be submitted
ReadyToSubmit,
/// The operation has been submitted and is awaiting confirmation
#[strum(to_string = "Confirm({0})")]
Confirm(ConfirmReason),
}
impl Encode for PendingOperationStatus {
fn write_to<W>(&self, writer: &mut W) -> std::io::Result<usize>
where
W: Write,
{
// Serialize to JSON and write to the writer, to avoid having to implement the encoding manually
let serialized = serde_json::to_vec(self)
.map_err(|_| std::io::Error::new(std::io::ErrorKind::Other, "Failed to serialize"))?;
writer.write(&serialized)
}
}
impl Decode for PendingOperationStatus {
fn read_from<R>(reader: &mut R) -> Result<Self, HyperlaneProtocolError>
where
R: std::io::Read,
Self: Sized,
{
// Deserialize from JSON and read from the reader, to avoid having to implement the encoding / decoding manually
serde_json::from_reader(reader).map_err(|err| {
HyperlaneProtocolError::IoError(std::io::Error::new(
std::io::ErrorKind::Other,
format!("Failed to deserialize. Error: {}", err),
))
})
}
}
#[derive(Display, Debug, Clone, Serialize, Deserialize, PartialEq)]
/// Reasons for repreparing an operation
/// WARNING: This enum is serialized to JSON and stored in the database, so to keep backwards compatibility, we shouldn't remove or rename any variants.
/// Adding new variants is fine.
pub enum ReprepareReason {
#[strum(to_string = "Error checking message delivery status")]
/// Error checking message delivery status
ErrorCheckingDeliveryStatus,
#[strum(to_string = "Error checking if message recipient is a contract")]
/// Error checking if message recipient is a contract
ErrorCheckingIfRecipientIsContract,
#[strum(to_string = "Error fetching ISM address")]
/// Error fetching ISM address
ErrorFetchingIsmAddress,
#[strum(to_string = "Error getting message metadata builder")]
/// Error getting message metadata builder
ErrorGettingMetadataBuilder,
#[strum(to_string = "Error building metadata")]
/// Error building metadata
ErrorBuildingMetadata,
#[strum(to_string = "Could not fetch metadata")]
/// Could not fetch metadata
CouldNotFetchMetadata,
#[strum(to_string = "Error estimating costs for process call")]
/// Error estimating costs for process call
ErrorEstimatingGas,
#[strum(to_string = "Error checking if message meets gas payment requirement")]
/// Error checking if message meets gas payment requirement
ErrorCheckingGasRequirement,
#[strum(to_string = "Gas payment requirement not met")]
/// Gas payment requirement not met
GasPaymentRequirementNotMet,
#[strum(to_string = "Message delivery estimated gas exceeds max gas limit")]
/// Message delivery estimated gas exceeds max gas limit
ExceedsMaxGasLimit,
#[strum(to_string = "Delivery transaction reverted or reorged")]
/// Delivery transaction reverted or reorged
RevertedOrReorged,
}
#[derive(Display, Debug, Clone, Serialize, Deserialize, PartialEq)]
/// Reasons for repreparing an operation
/// WARNING: This enum is serialized to JSON and stored in the database, so to keep backwards compatibility, we shouldn't remove or rename any variants.
/// Adding new variants is fine.
pub enum ConfirmReason {
#[strum(to_string = "Submitted by this relayer")]
/// Operation was submitted by this relayer
SubmittedBySelf,
#[strum(to_string = "Already submitted, awaiting confirmation")]
/// Operation was already submitted (either by another relayer, or by a previous run of this relayer), awaiting confirmation
AlreadySubmitted,
/// Error checking message delivery status
ErrorConfirmingDelivery,
/// Error storing delivery outcome
ErrorRecordingProcessSuccess,
}
/// Utility fn to calculate the total estimated cost of an operation batch
pub fn total_estimated_cost(ops: &[Box<dyn PendingOperation>]) -> U256 {
ops.iter()
@ -192,40 +307,22 @@ pub enum PendingOperationResult {
/// This operation is not ready to be attempted again yet
NotReady,
/// Operation needs to be started from scratch again
Reprepare,
Reprepare(ReprepareReason),
/// Do not attempt to run the operation again, forget about it
Drop,
/// Send this message straight to the confirm queue
Confirm,
Confirm(ConfirmReason),
}
/// create a `op_try!` macro for the `on_retry` handler.
#[macro_export]
macro_rules! make_op_try {
($on_retry:expr) => {
/// Handle a result and either return early with retry or a critical failure on
/// error.
macro_rules! op_try {
(critical: $e:expr, $ctx:literal) => {
match $e {
Ok(v) => v,
Err(e) => {
error!(error=?e, concat!("Critical error when ", $ctx));
#[allow(clippy::redundant_closure_call)]
return $on_retry();
}
}
};
($e:expr, $ctx:literal) => {
match $e {
Ok(v) => v,
Err(e) => {
warn!(error=?e, concat!("Error when ", $ctx));
#[allow(clippy::redundant_closure_call)]
return $on_retry();
}
}
};
}
};
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_encoding_pending_operation_status() {
let status = PendingOperationStatus::Retry(ReprepareReason::CouldNotFetchMetadata);
let encoded = status.to_vec();
let decoded = PendingOperationStatus::read_from(&mut &encoded[..]).unwrap();
assert_eq!(status, decoded);
}
}

@ -1,6 +1,6 @@
use derive_new::new;
use crate::{HyperlaneMessage, MerkleTreeInsertion, Sequenced};
use crate::{HyperlaneMessage, InterchainGasPayment, MerkleTreeInsertion, Sequenced};
/// Wrapper struct that adds indexing information to a type
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, new)]
@ -73,3 +73,9 @@ impl From<MerkleTreeInsertion> for Indexed<MerkleTreeInsertion> {
Indexed::new(value).with_sequence(sequence as _)
}
}
impl From<InterchainGasPayment> for Indexed<InterchainGasPayment> {
fn from(value: InterchainGasPayment) -> Self {
Indexed::new(value)
}
}

@ -93,7 +93,7 @@ where
Ok(Self::from(Self::fetch_data(buf)?.unwrap_or_default()))
}
// Optimisically write then realloc on failure.
// Optimistically write then realloc on failure.
// If we serialize and calculate len before realloc we will waste heap space as there is no
// free(). Tradeoff between heap usage and compute budget.
pub fn store(

@ -11,7 +11,7 @@ thiserror.workspace = true
hyperlane-core = { path = "../../../hyperlane-core" }
# Required to allow dependencies `getrandom` but to preserve determinism required by programs, see
# https://github.com/solana-labs/solana/blob/master/docs/src/developing/on-chain-programs/developing-rust.md#depending-on-rand
# https://github.com/solana-foundation/developer-content/blob/main/docs/programs/lang-rust.md#depending-on-rand
getrandom = { workspace = true, features = ["custom"] }
[lib]

@ -12,7 +12,7 @@ spl-type-length-value.workspace = true
hyperlane-core = { path = "../../../hyperlane-core" }
# Required to allow dependencies `getrandom` but to preserve determinism required by programs, see
# https://github.com/solana-labs/solana/blob/master/docs/src/developing/on-chain-programs/developing-rust.md#depending-on-rand
# https://github.com/solana-foundation/developer-content/blob/main/docs/programs/lang-rust.md#depending-on-rand
getrandom = { workspace = true, features = ["custom"] }
[lib]

@ -21,7 +21,7 @@ num-derive.workspace = true
num-traits.workspace = true
thiserror.workspace = true
# Required to allow dependencies `getrandom` but to preserve determinism required by programs, see
# https://github.com/solana-labs/solana/blob/master/docs/src/developing/on-chain-programs/developing-rust.md#depending-on-rand
# https://github.com/solana-foundation/developer-content/blob/main/docs/programs/lang-rust.md#depending-on-rand
getrandom = { workspace = true, features = ["custom"] }
serde = { workspace = true, optional = true }

@ -212,7 +212,7 @@ async fn test_dispatch_from_eoa() {
local_domain: LOCAL_DOMAIN,
outbox_bump_seed: mailbox_accounts.outbox_bump_seed,
owner: Some(payer.pubkey()),
tree: expected_tree,
tree: expected_tree.clone(),
},
)
.await;

@ -17,7 +17,7 @@ solana-program.workspace = true
thiserror.workspace = true
spl-noop.workspace = true
# Required to allow dependencies `getrandom` but to preserve determinism required by programs, see
# https://github.com/solana-labs/solana/blob/master/docs/src/developing/on-chain-programs/developing-rust.md#depending-on-rand
# https://github.com/solana-foundation/developer-content/blob/main/docs/programs/lang-rust.md#depending-on-rand
getrandom = { workspace = true, features = ["custom"] }
proc-macro-crate = "~1.2.1" # TODO: remove this dependency once solana supports rust >=1.64

@ -177,10 +177,7 @@ impl AgentConfig {
amount: "0.05".to_string(),
},
contract_address_bytes: 32,
index: AgentConfigIndex {
from: 1,
chunk: 100,
},
index: AgentConfigIndex { from: 1, chunk: 5 },
}
}
}

@ -1,5 +1,18 @@
# @hyperlane-xyz/core
## 3.16.0
### Patch Changes
- @hyperlane-xyz/utils@3.16.0
## 3.15.1
### Patch Changes
- 6620fe636: fix: `TokenRouter.transferRemote` with hook overrides
- @hyperlane-xyz/utils@3.15.1
## 3.15.0
### Minor Changes

@ -20,6 +20,11 @@ interface IDelegationManager {
uint32 stakerOptOutWindowBlocks;
}
event OperatorMetadataURIUpdated(
address indexed operator,
string metadataURI
);
function registerAsOperator(
OperatorDetails calldata registeringOperatorDetails,
string calldata metadataURI

@ -8,6 +8,7 @@ interface IMultisigIsm is IInterchainSecurityModule {
* @notice Returns the set of validators responsible for verifying _message
* and the number of signatures required
* @dev Can change based on the content of _message
* @dev Signatures provided to `verify` must be consistent with validator ordering
* @param _message Hyperlane formatted interchain message
* @return validators The array of validator addresses
* @return threshold The number of validator signatures needed

@ -26,6 +26,7 @@ abstract contract AbstractMultisigIsm is IMultisigIsm {
* @notice Returns the set of validators responsible for verifying _message
* and the number of signatures required
* @dev Can change based on the content of _message
* @dev Signatures provided to `verify` must be consistent with validator ordering
* @param _message Hyperlane formatted interchain message
* @return validators The array of validator addresses
* @return threshold The number of validator signatures needed
@ -60,7 +61,9 @@ abstract contract AbstractMultisigIsm is IMultisigIsm {
/**
* @notice Requires that m-of-n validators verify a merkle root,
* and verifies a merkle proof of `_message` against that root.
* and verifies a merkle proof of `_message` against that root.
* @dev Optimization relies on the caller sorting signatures in the same order as validators.
* @dev Employs https://www.geeksforgeeks.org/two-pointers-technique/ to minimize gas usage.
* @param _metadata ABI encoded module metadata
* @param _message Formatted Hyperlane message (see Message.sol).
*/

@ -1,10 +1,10 @@
{
"name": "@hyperlane-xyz/core",
"description": "Core solidity contracts for Hyperlane",
"version": "3.15.0",
"version": "3.16.0",
"dependencies": {
"@eth-optimism/contracts": "^0.6.0",
"@hyperlane-xyz/utils": "3.15.0",
"@hyperlane-xyz/utils": "3.16.0",
"@layerzerolabs/lz-evm-oapp-v2": "2.0.2",
"@openzeppelin/contracts": "^4.9.3",
"@openzeppelin/contracts-upgradeable": "^v4.9.3",

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/sdk': major
---
Refactor testIgpConfig function for clarity and maintainability

@ -0,0 +1,5 @@
---
'@hyperlane-xyz/sdk': major
---
fix(sdk): refactor `addVerificationArtifacts` for enhanced Artifact Deduplication in HyperlaneDeployer

@ -1,5 +1,9 @@
# @hyperlane-xyz/ccip-server
## 3.16.0
## 3.15.1
## 3.15.0
## 3.14.0

@ -1,6 +1,6 @@
{
"name": "@hyperlane-xyz/ccip-server",
"version": "3.15.0",
"version": "3.16.0",
"description": "CCIP server",
"typings": "dist/index.d.ts",
"typedocMain": "src/index.ts",

@ -1,5 +1,24 @@
# @hyperlane-xyz/cli
## 3.16.0
### Patch Changes
- Updated dependencies [f9bbdde76]
- Updated dependencies [5cc64eb09]
- @hyperlane-xyz/sdk@3.16.0
- @hyperlane-xyz/utils@3.16.0
## 3.15.1
### Patch Changes
- 921e449b4: Support priorityFee fetching from RPC and some better logging
- Updated dependencies [acaa22cd9]
- Updated dependencies [921e449b4]
- @hyperlane-xyz/sdk@3.15.1
- @hyperlane-xyz/utils@3.15.1
## 3.15.0
### Minor Changes

@ -297,7 +297,7 @@ run_validator() {
echo "Validator running, sleeping to let it sync"
# This needs to be long to allow time for the cargo build to finish
sleep 15
sleep 20
echo "Done sleeping"
for CHAIN in ${CHAIN1} ${CHAIN2}

@ -1,13 +1,13 @@
{
"name": "@hyperlane-xyz/cli",
"version": "3.15.0",
"version": "3.16.0",
"description": "A command-line utility for common Hyperlane operations",
"dependencies": {
"@aws-sdk/client-kms": "^3.577.0",
"@aws-sdk/client-s3": "^3.577.0",
"@hyperlane-xyz/registry": "^2.1.1",
"@hyperlane-xyz/sdk": "3.15.0",
"@hyperlane-xyz/utils": "3.15.0",
"@hyperlane-xyz/registry": "2.1.1",
"@hyperlane-xyz/sdk": "3.16.0",
"@hyperlane-xyz/utils": "3.16.0",
"@inquirer/prompts": "^3.0.0",
"asn1.js": "^5.4.1",
"bignumber.js": "^9.1.1",

@ -0,0 +1,449 @@
import { Wallet } from 'ethers';
import {
ECDSAStakeRegistry__factory,
IDelegationManager__factory,
MerkleTreeHook__factory,
ValidatorAnnounce__factory,
} from '@hyperlane-xyz/core';
import { ChainMap, ChainName, MultiProvider } from '@hyperlane-xyz/sdk';
import { Address, ProtocolType, isObjEmpty } from '@hyperlane-xyz/utils';
import { CommandContext } from '../context/types.js';
import {
errorRed,
log,
logBlue,
logBlueKeyValue,
logBoldBlue,
logDebug,
logGreen,
warnYellow,
} from '../logger.js';
import { indentYamlOrJson } from '../utils/files.js';
import {
getLatestMerkleTreeCheckpointIndex,
getLatestValidatorCheckpointIndexAndUrl,
getValidatorStorageLocations,
isValidatorSigningLatestCheckpoint,
} from '../validator/utils.js';
import { avsAddresses } from './config.js';
import { readOperatorFromEncryptedJson } from './stakeRegistry.js';
interface ChainInfo {
storageLocation?: string;
latestMerkleTreeCheckpointIndex?: number;
latestValidatorCheckpointIndex?: number;
validatorSynced?: boolean;
warnings?: string[];
}
interface ValidatorInfo {
operatorAddress: Address;
operatorName?: string;
chains: ChainMap<ChainInfo>;
}
export const checkValidatorAvsSetup = async (
chain: string,
context: CommandContext,
operatorKeyPath?: string,
operatorAddress?: string,
) => {
logBlue(
`Checking AVS validator status for ${chain}, ${
!operatorKeyPath ? 'this may take up to a minute to run' : ''
}...`,
);
const { multiProvider } = context;
const topLevelErrors: string[] = [];
let operatorWallet: Wallet | undefined;
if (operatorKeyPath) {
operatorWallet = await readOperatorFromEncryptedJson(operatorKeyPath);
}
const avsOperatorRecord = await getAvsOperators(
chain,
multiProvider,
topLevelErrors,
operatorAddress ?? operatorWallet?.address,
);
await setOperatorName(
chain,
avsOperatorRecord,
multiProvider,
topLevelErrors,
);
if (!isObjEmpty(avsOperatorRecord)) {
await setValidatorInfo(context, avsOperatorRecord, topLevelErrors);
}
logOutput(avsOperatorRecord, topLevelErrors);
};
const getAvsOperators = async (
chain: string,
multiProvider: MultiProvider,
topLevelErrors: string[],
operatorKey?: string,
): Promise<ChainMap<ValidatorInfo>> => {
const avsOperators: Record<Address, ValidatorInfo> = {};
const ecdsaStakeRegistryAddress = getEcdsaStakeRegistryAddress(
chain,
topLevelErrors,
);
if (!ecdsaStakeRegistryAddress) {
return avsOperators;
}
const ecdsaStakeRegistry = ECDSAStakeRegistry__factory.connect(
ecdsaStakeRegistryAddress,
multiProvider.getProvider(chain),
);
if (operatorKey) {
// If operator key is provided, only fetch the operator's validator info
const signingKey = await ecdsaStakeRegistry.getLastestOperatorSigningKey(
operatorKey,
);
avsOperators[signingKey] = {
operatorAddress: operatorKey,
chains: {},
};
return avsOperators;
}
const filter = ecdsaStakeRegistry.filters.SigningKeyUpdate(null, null);
const provider = multiProvider.getProvider(chain);
const latestBlock = await provider.getBlockNumber();
const blockLimit = 50000; // 50k blocks per query
let fromBlock = 1625972; // when ecdsaStakeRegistry was deployed
while (fromBlock < latestBlock) {
const toBlock = Math.min(fromBlock + blockLimit, latestBlock);
const logs = await ecdsaStakeRegistry.queryFilter(
filter,
fromBlock,
toBlock,
);
logs.forEach((log) => {
const event = ecdsaStakeRegistry.interface.parseLog(log);
const operatorKey = event.args.operator;
const signingKey = event.args.newSigningKey;
if (avsOperators[signingKey]) {
avsOperators[signingKey].operatorAddress = operatorKey;
} else {
avsOperators[signingKey] = {
operatorAddress: operatorKey,
chains: {},
};
}
});
fromBlock = toBlock + 1;
}
return avsOperators;
};
const getAVSMetadataURI = async (
chain: string,
operatorAddress: string,
multiProvider: MultiProvider,
): Promise<string | undefined> => {
const delegationManagerAddress = avsAddresses[chain]['delegationManager'];
const delegationManager = IDelegationManager__factory.connect(
delegationManagerAddress,
multiProvider.getProvider(chain),
);
const filter = delegationManager.filters.OperatorMetadataURIUpdated(
operatorAddress,
null,
);
const provider = multiProvider.getProvider(chain);
const latestBlock = await provider.getBlockNumber();
const blockLimit = 50000; // 50k blocks per query
let fromBlock = 17445563;
while (fromBlock < latestBlock) {
const toBlock = Math.min(fromBlock + blockLimit, latestBlock);
const logs = await delegationManager.queryFilter(
filter,
fromBlock,
toBlock,
);
if (logs.length > 0) {
const event = delegationManager.interface.parseLog(logs[0]);
return event.args.metadataURI;
}
fromBlock = toBlock + 1;
}
return undefined;
};
const setOperatorName = async (
chain: string,
avsOperatorRecord: Record<Address, ValidatorInfo>,
multiProvider: MultiProvider,
topLevelErrors: string[] = [],
) => {
for (const [_, validatorInfo] of Object.entries(avsOperatorRecord)) {
const metadataURI = await getAVSMetadataURI(
chain,
validatorInfo.operatorAddress,
multiProvider,
);
if (metadataURI) {
const operatorName = await fetchOperatorName(metadataURI);
if (operatorName) {
validatorInfo.operatorName = operatorName;
} else {
topLevelErrors.push(
` Failed to fetch operator name from metadataURI: ${metadataURI}`,
);
}
}
}
};
const setValidatorInfo = async (
context: CommandContext,
avsOperatorRecord: Record<Address, ValidatorInfo>,
topLevelErrors: string[],
) => {
const { multiProvider, registry, chainMetadata } = context;
const failedToReadChains: string[] = [];
const validatorAddresses = Object.keys(avsOperatorRecord);
const chains = await registry.getChains();
const addresses = await registry.getAddresses();
for (const chain of chains) {
// skip if chain is not an Ethereum chain
if (chainMetadata[chain].protocol !== ProtocolType.Ethereum) continue;
const chainAddresses = addresses[chain];
// skip if no contract addresses are found for this chain
if (chainAddresses === undefined) continue;
if (!chainAddresses.validatorAnnounce) {
topLevelErrors.push(` ValidatorAnnounce is not deployed on ${chain}`);
}
if (!chainAddresses.merkleTreeHook) {
topLevelErrors.push(` MerkleTreeHook is not deployed on ${chain}`);
}
if (!chainAddresses.validatorAnnounce || !chainAddresses.merkleTreeHook) {
continue;
}
const validatorAnnounce = ValidatorAnnounce__factory.connect(
chainAddresses.validatorAnnounce,
multiProvider.getProvider(chain),
);
const merkleTreeHook = MerkleTreeHook__factory.connect(
chainAddresses.merkleTreeHook,
multiProvider.getProvider(chain),
);
const latestMerkleTreeCheckpointIndex =
await getLatestMerkleTreeCheckpointIndex(merkleTreeHook, chain);
const validatorStorageLocations = await getValidatorStorageLocations(
validatorAnnounce,
validatorAddresses,
chain,
);
if (!validatorStorageLocations) {
failedToReadChains.push(chain);
continue;
}
for (let i = 0; i < validatorAddresses.length; i++) {
const validatorAddress = validatorAddresses[i];
const storageLocation = validatorStorageLocations[i];
const warnings: string[] = [];
// Skip if no storage location is found, address is not validating on this chain or if storage location string doesn't not start with s3://
if (
storageLocation.length === 0 ||
!storageLocation[0].startsWith('s3://')
) {
continue;
}
const [latestValidatorCheckpointIndex, latestCheckpointUrl] =
(await getLatestValidatorCheckpointIndexAndUrl(storageLocation[0])) ?? [
undefined,
undefined,
];
if (!latestMerkleTreeCheckpointIndex) {
warnings.push(
` Failed to fetch latest checkpoint index of merkleTreeHook on ${chain}.`,
);
}
if (!latestValidatorCheckpointIndex) {
warnings.push(
` Failed to fetch latest signed checkpoint index of validator on ${chain}, this is likely due to failing to read an S3 bucket`,
);
}
let validatorSynced = undefined;
if (latestMerkleTreeCheckpointIndex && latestValidatorCheckpointIndex) {
validatorSynced = isValidatorSigningLatestCheckpoint(
latestValidatorCheckpointIndex,
latestMerkleTreeCheckpointIndex,
);
}
const chainInfo: ChainInfo = {
storageLocation: latestCheckpointUrl,
latestMerkleTreeCheckpointIndex,
latestValidatorCheckpointIndex,
validatorSynced,
warnings,
};
const validatorInfo = avsOperatorRecord[validatorAddress];
if (validatorInfo) {
validatorInfo.chains[chain as ChainName] = chainInfo;
}
}
}
if (failedToReadChains.length > 0) {
topLevelErrors.push(
` Failed to read storage locations onchain for ${failedToReadChains.join(
', ',
)}`,
);
}
};
const logOutput = (
avsKeysRecord: Record<Address, ValidatorInfo>,
topLevelErrors: string[],
) => {
if (topLevelErrors.length > 0) {
for (const error of topLevelErrors) {
errorRed(error);
}
}
for (const [validatorAddress, data] of Object.entries(avsKeysRecord)) {
log('\n\n');
if (data.operatorName) logBlueKeyValue('Operator name', data.operatorName);
logBlueKeyValue('Operator address', data.operatorAddress);
logBlueKeyValue('Validator address', validatorAddress);
if (!isObjEmpty(data.chains)) {
logBoldBlue(indentYamlOrJson('Validating on...', 2));
for (const [chain, chainInfo] of Object.entries(data.chains)) {
logBoldBlue(indentYamlOrJson(chain, 2));
if (chainInfo.storageLocation) {
logBlueKeyValue(
indentYamlOrJson('Storage location', 2),
chainInfo.storageLocation,
);
}
if (chainInfo.latestMerkleTreeCheckpointIndex) {
logBlueKeyValue(
indentYamlOrJson('Latest merkle tree checkpoint index', 2),
String(chainInfo.latestMerkleTreeCheckpointIndex),
);
}
if (chainInfo.latestValidatorCheckpointIndex) {
logBlueKeyValue(
indentYamlOrJson('Latest validator checkpoint index', 2),
String(chainInfo.latestValidatorCheckpointIndex),
);
if (chainInfo.validatorSynced) {
logGreen(
indentYamlOrJson('✅ Validator is signing latest checkpoint', 2),
);
} else {
errorRed(
indentYamlOrJson(
'❌ Validator is not signing latest checkpoint',
2,
),
);
}
} else {
errorRed(
indentYamlOrJson(
'❌ Failed to fetch latest signed checkpoint index',
2,
),
);
}
if (chainInfo.warnings && chainInfo.warnings.length > 0) {
warnYellow(
indentYamlOrJson('The following warnings were encountered:', 2),
);
for (const warning of chainInfo.warnings) {
warnYellow(indentYamlOrJson(warning, 3));
}
}
}
} else {
logBlue('Validator is not validating on any chain');
}
}
};
const getEcdsaStakeRegistryAddress = (
chain: string,
topLevelErrors: string[],
): Address | undefined => {
try {
return avsAddresses[chain]['ecdsaStakeRegistry'];
} catch (err) {
topLevelErrors.push(
` EcdsaStakeRegistry address not found for ${chain}`,
);
return undefined;
}
};
const fetchOperatorName = async (metadataURI: string) => {
try {
const response = await fetch(metadataURI);
const data = await response.json();
return data['name'];
} catch (err) {
logDebug(`Failed to fetch operator name from ${metadataURI}: ${err}`);
return undefined;
}
};

@ -3,6 +3,7 @@ import { Address } from '@hyperlane-xyz/utils';
interface AVSContracts {
avsDirectory: Address;
delegationManager: Address;
proxyAdmin: Address;
ecdsaStakeRegistry: Address;
hyperlaneServiceManager: Address;
@ -12,12 +13,14 @@ interface AVSContracts {
export const avsAddresses: ChainMap<AVSContracts> = {
holesky: {
avsDirectory: '0x055733000064333CaDDbC92763c58BF0192fFeBf',
delegationManager: '0xA44151489861Fe9e3055d95adC98FbD462B948e7',
proxyAdmin: '0x33dB966328Ea213b0f76eF96CA368AB37779F065',
ecdsaStakeRegistry: '0xFfa913705484C9BAea32Ffe9945BeA099A1DFF72',
hyperlaneServiceManager: '0xc76E477437065093D353b7d56c81ff54D167B0Ab',
},
ethereum: {
avsDirectory: '0x135dda560e946695d6f155dacafc6f1f25c1f5af',
delegationManager: '0x39053D51B77DC0d36036Fc1fCc8Cb819df8Ef37A',
proxyAdmin: '0x75EE15Ee1B4A75Fa3e2fDF5DF3253c25599cc659',
ecdsaStakeRegistry: '0x272CF0BB70D3B4f79414E0823B426d2EaFd48910',
hyperlaneServiceManager: '0xe8E59c6C8B56F2c178f63BCFC4ce5e5e2359c8fc',

@ -109,7 +109,7 @@ export async function deregisterOperator({
);
}
async function readOperatorFromEncryptedJson(
export async function readOperatorFromEncryptedJson(
operatorKeyPath: string,
): Promise<Wallet> {
const encryptedJson = readFileAtPath(resolvePath(operatorKeyPath));

@ -1,14 +1,21 @@
import { CommandModule, Options } from 'yargs';
import { ChainName } from '@hyperlane-xyz/sdk';
import { Address } from '@hyperlane-xyz/utils';
import { Address, ProtocolType } from '@hyperlane-xyz/utils';
import { checkValidatorAvsSetup } from '../avs/check.js';
import {
deregisterOperator,
registerOperatorWithSignature,
} from '../avs/stakeRegistry.js';
import { CommandModuleWithWriteContext } from '../context/types.js';
import { log } from '../logger.js';
import { errorRed, log } from '../logger.js';
import {
avsChainCommandOption,
demandOption,
operatorKeyPathCommandOption,
} from './options.js';
/**
* Parent command
@ -20,6 +27,7 @@ export const avsCommand: CommandModule = {
yargs
.command(registerCommand)
.command(deregisterCommand)
.command(checkCommand)
.version(false)
.demandCommand(),
handler: () => log('Command required'),
@ -29,17 +37,8 @@ export const avsCommand: CommandModule = {
* Registration command
*/
export const registrationOptions: { [k: string]: Options } = {
chain: {
type: 'string',
description: 'Chain to interact with the AVS on',
demandOption: true,
choices: ['holesky', 'ethereum'],
},
operatorKeyPath: {
type: 'string',
description: 'Path to the operator key file',
demandOption: true,
},
chain: avsChainCommandOption,
operatorKeyPath: demandOption(operatorKeyPathCommandOption),
avsSigningKeyAddress: {
type: 'string',
description: 'Address of the AVS signing key',
@ -87,3 +86,47 @@ const deregisterCommand: CommandModuleWithWriteContext<{
process.exit(0);
},
};
const checkCommand: CommandModuleWithWriteContext<{
chain: ChainName;
operatorKeyPath?: string;
operatorAddress?: string;
}> = {
command: 'check',
describe: 'Check AVS',
builder: {
chain: avsChainCommandOption,
operatorKeyPath: operatorKeyPathCommandOption,
operatorAddress: {
type: 'string',
description: 'Address of the operator to check',
},
},
handler: async ({ context, chain, operatorKeyPath, operatorAddress }) => {
const { multiProvider } = context;
// validate chain
if (!multiProvider.hasChain(chain)) {
errorRed(
`❌ No metadata found for ${chain}. Ensure it is included in your configured registry.`,
);
process.exit(1);
}
const chainMetadata = multiProvider.getChainMetadata(chain);
if (chainMetadata.protocol !== ProtocolType.Ethereum) {
errorRed(`\n❌ Validator AVS check only supports EVM chains. Exiting.`);
process.exit(1);
}
await checkValidatorAvsSetup(
chain,
context,
operatorKeyPath,
operatorAddress,
);
process.exit(0);
},
};

@ -204,3 +204,15 @@ export const awsKeyIdCommandOption: Options = {
type: 'string',
describe: 'Key ID from AWS KMS',
};
export const operatorKeyPathCommandOption: Options = {
type: 'string',
description: 'Path to the operator key file',
};
export const avsChainCommandOption: Options = {
type: 'string',
description: 'Chain to interact with the AVS on',
demandOption: true,
choices: ['holesky', 'ethereum'],
};

@ -35,6 +35,9 @@ export function logColor(
}
}
export const logBlue = (...args: any) => logColor('info', chalk.blue, ...args);
export const logBlueKeyValue = (key: string, value: string) => {
logBlue(`${chalk.bold(`${key}:`)} ${value}`);
};
export const logPink = (...args: any) =>
logColor('info', chalk.magentaBright, ...args);
export const logGray = (...args: any) => logColor('info', chalk.gray, ...args);
@ -43,11 +46,16 @@ export const logGreen = (...args: any) =>
export const logRed = (...args: any) => logColor('info', chalk.red, ...args);
export const logBoldUnderlinedRed = (...args: any) =>
logColor('info', chalk.red.bold.underline, ...args);
export const logBoldBlue = (...args: any) =>
logColor('info', chalk.blue.bold, ...args);
export const logTip = (...args: any) =>
logColor('info', chalk.bgYellow, ...args);
export const warnYellow = (...args: any) =>
logColor('warn', chalk.yellow, ...args);
export const errorRed = (...args: any) => logColor('error', chalk.red, ...args);
export const logDebug = (msg: string, ...args: any) =>
logger.debug(msg, ...args);
// No support for table in pino so print directly to console
export const logTable = (...args: any) => console.table(...args);

@ -0,0 +1,69 @@
import { MerkleTreeHook, ValidatorAnnounce } from '@hyperlane-xyz/core';
import { S3Validator } from '@hyperlane-xyz/sdk';
import { logDebug } from '../logger.js';
export const getLatestMerkleTreeCheckpointIndex = async (
merkleTreeHook: MerkleTreeHook,
chainName?: string,
): Promise<number | undefined> => {
try {
const [_, latestCheckpointIndex] = await merkleTreeHook.latestCheckpoint();
return latestCheckpointIndex;
} catch (err) {
const debugMessage = `Failed to get latest checkpoint index from merkleTreeHook contract ${
chainName ? `on ${chainName}` : ''
} : ${err}`;
logDebug(debugMessage);
return undefined;
}
};
export const getValidatorStorageLocations = async (
validatorAnnounce: ValidatorAnnounce,
validators: string[],
chainName?: string,
): Promise<string[][] | undefined> => {
try {
return await validatorAnnounce.getAnnouncedStorageLocations(validators);
} catch (err) {
const debugMessage = `Failed to get announced storage locations from validatorAnnounce contract ${
chainName ? `on ${chainName}` : ''
} : ${err}`;
logDebug(debugMessage);
return undefined;
}
};
export const getLatestValidatorCheckpointIndexAndUrl = async (
s3StorageLocation: string,
): Promise<[number, string] | undefined> => {
let s3Validator: S3Validator;
try {
s3Validator = await S3Validator.fromStorageLocation(s3StorageLocation);
} catch (err) {
logDebug(
`Failed to instantiate S3Validator at location ${s3StorageLocation}: ${err}`,
);
return undefined;
}
try {
const latestCheckpointIndex = await s3Validator.getLatestCheckpointIndex();
return [latestCheckpointIndex, s3Validator.getLatestCheckpointUrl()];
} catch (err) {
logDebug(
`Failed to get latest checkpoint index from S3Validator at location ${s3StorageLocation}: ${err}`,
);
return undefined;
}
};
export const isValidatorSigningLatestCheckpoint = (
latestValidatorCheckpointIndex: number,
latestMerkleTreeCheckpointIndex: number,
): boolean => {
const diff = Math.abs(
latestValidatorCheckpointIndex - latestMerkleTreeCheckpointIndex,
);
return diff < latestMerkleTreeCheckpointIndex / 100;
};

@ -1 +1 @@
export const VERSION = '3.15.0';
export const VERSION = '3.16.0';

@ -1,5 +1,25 @@
# @hyperlane-xyz/helloworld
## 3.16.0
### Patch Changes
- Updated dependencies [f9bbdde76]
- Updated dependencies [5cc64eb09]
- @hyperlane-xyz/sdk@3.16.0
- @hyperlane-xyz/core@3.16.0
## 3.15.1
### Patch Changes
- 6620fe636: fix: `TokenRouter.transferRemote` with hook overrides
- Updated dependencies [6620fe636]
- Updated dependencies [acaa22cd9]
- Updated dependencies [921e449b4]
- @hyperlane-xyz/core@3.15.1
- @hyperlane-xyz/sdk@3.15.1
## 3.15.0
### Patch Changes

@ -1,11 +1,11 @@
{
"name": "@hyperlane-xyz/helloworld",
"description": "A basic skeleton of an Hyperlane app",
"version": "3.15.0",
"version": "3.16.0",
"dependencies": {
"@hyperlane-xyz/core": "3.15.0",
"@hyperlane-xyz/registry": "^2.1.1",
"@hyperlane-xyz/sdk": "3.15.0",
"@hyperlane-xyz/core": "3.16.0",
"@hyperlane-xyz/registry": "2.1.1",
"@hyperlane-xyz/sdk": "3.16.0",
"@openzeppelin/contracts-upgradeable": "^4.9.3",
"ethers": "^5.7.2"
},

@ -1,5 +1,33 @@
# @hyperlane-xyz/infra
## 3.16.0
### Minor Changes
- 5cc64eb09: Add support for new chains: linea, fraxtal, sei.
Support osmosis remote.
Drive-by fix to always fetch explorer API keys when running deploy script.
### Patch Changes
- 5cc64eb09: Allow selecting a specific chain to govern in check-deploy script
- Updated dependencies [f9bbdde76]
- Updated dependencies [5cc64eb09]
- @hyperlane-xyz/sdk@3.16.0
- @hyperlane-xyz/helloworld@3.16.0
- @hyperlane-xyz/utils@3.16.0
## 3.15.1
### Patch Changes
- Updated dependencies [6620fe636]
- Updated dependencies [acaa22cd9]
- Updated dependencies [921e449b4]
- @hyperlane-xyz/helloworld@3.15.1
- @hyperlane-xyz/sdk@3.15.1
- @hyperlane-xyz/utils@3.15.1
## 3.15.0
### Patch Changes

@ -27,6 +27,7 @@ import inevmEthereumUsdcAddresses from './warp/inevm-USDC-addresses.json';
import inevmEthereumUsdtAddresses from './warp/inevm-USDT-addresses.json';
import injectiveInevmInjAddresses from './warp/injective-inevm-addresses.json';
import mantaTIAAddresses from './warp/manta-TIA-addresses.json';
import renzoEzEthAddresses from './warp/renzo-ezETH-addresses.json';
import victionEthereumEthAddresses from './warp/viction-ETH-addresses.json';
import victionEthereumUsdcAddresses from './warp/viction-USDC-addresses.json';
import victionEthereumUsdtAddresses from './warp/viction-USDT-addresses.json';
@ -53,18 +54,22 @@ export const hyperlaneContextAgentChainConfig: AgentChainConfig = {
bsc: true,
celo: true,
ethereum: true,
fraxtal: true,
gnosis: true,
injective: true,
inevm: true,
linea: true,
mantapacific: true,
mode: true,
moonbeam: true,
neutron: true,
optimism: true,
osmosis: true,
polygon: true,
polygonzkevm: true,
redstone: true,
scroll: true,
sei: true,
viction: true,
zetachain: true,
},
@ -77,19 +82,23 @@ export const hyperlaneContextAgentChainConfig: AgentChainConfig = {
bsc: true,
celo: true,
ethereum: true,
fraxtal: true,
gnosis: true,
injective: true,
inevm: true,
linea: true,
mantapacific: true,
mode: true,
moonbeam: true,
// At the moment, we only relay between Neutron and Manta Pacific on the neutron context.
neutron: false,
optimism: true,
osmosis: true,
polygon: true,
polygonzkevm: true,
redstone: true,
scroll: true,
sei: true,
viction: true,
zetachain: true,
},
@ -102,19 +111,26 @@ export const hyperlaneContextAgentChainConfig: AgentChainConfig = {
bsc: true,
celo: true,
ethereum: true,
fraxtal: true,
gnosis: true,
// Cannot scrape non-EVM chains
injective: false,
inevm: true,
linea: true,
mantapacific: true,
mode: true,
moonbeam: true,
// Cannot scrape non-EVM chains
neutron: false,
optimism: true,
// Cannot scrape non-EVM chains
osmosis: false,
polygon: true,
polygonzkevm: true,
redstone: true,
// Out of caution around pointer contracts (https://www.docs.sei.io/dev-interoperability/pointer-contracts) not being compatible
// and the scraper not gracefully handling txs that may not exist via the eth RPC, we don't run the scraper.
sei: false,
scroll: true,
// Has RPC non-compliance that breaks scraping.
viction: false,
@ -198,8 +214,34 @@ const metricAppContexts = [
name: 'ancient8_ethereum_usdc',
matchingList: routerMatchingList(ancient8EthereumUsdcAddresses),
},
{
name: 'renzo_ezeth',
matchingList: routerMatchingList(renzoEzEthAddresses),
},
];
// Resource requests are based on observed usage found in https://abacusworks.grafana.net/d/FSR9YWr7k
const relayerResources = {
requests: {
cpu: '14000m',
memory: '12Gi',
},
};
const validatorResources = {
requests: {
cpu: '250m',
memory: '256Mi',
},
};
const scraperResources = {
requests: {
cpu: '100m',
memory: '4Gi',
},
};
const hyperlane: RootAgentConfig = {
...contextBase,
context: Contexts.Hyperlane,
@ -209,25 +251,28 @@ const hyperlane: RootAgentConfig = {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: '939fa81-20240607-194607',
tag: '9535087-20240623-174819',
},
gasPaymentEnforcement: gasPaymentEnforcement,
metricAppContexts,
resources: relayerResources,
},
validators: {
docker: {
repo,
tag: 'de8c2a7-20240515-135254',
tag: '0d12ff3-20240620-173353',
},
rpcConsensusType: RpcConsensusType.Quorum,
chains: validatorChainConfig(Contexts.Hyperlane),
resources: validatorResources,
},
scraper: {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: '939fa81-20240607-194607',
tag: '0d12ff3-20240620-173353',
},
resources: scraperResources,
},
};
@ -240,21 +285,23 @@ const releaseCandidate: RootAgentConfig = {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: '939fa81-20240607-194607',
tag: '9535087-20240623-174819',
},
// We're temporarily (ab)using the RC relayer as a way to increase
// message throughput.
// whitelist: releaseCandidateHelloworldMatchingList,
gasPaymentEnforcement,
metricAppContexts,
resources: relayerResources,
},
validators: {
docker: {
repo,
tag: 'c9c5d37-20240510-014327',
tag: '0d12ff3-20240620-173353',
},
rpcConsensusType: RpcConsensusType.Quorum,
chains: validatorChainConfig(Contexts.ReleaseCandidate),
resources: validatorResources,
},
};
@ -271,7 +318,7 @@ const neutron: RootAgentConfig = {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: 'c9c5d37-20240510-014327',
tag: '0d12ff3-20240620-173353',
},
gasPaymentEnforcement: [
{
@ -298,6 +345,7 @@ const neutron: RootAgentConfig = {
matchingList: routerMatchingList(arbitrumNeutronEclipAddresses),
},
],
resources: relayerResources,
},
};

@ -47,6 +47,9 @@
"0x749d6e7ad949e522c92181dc77f7bbc1c5d71506"
]
},
"fraxtal": {
"validators": ["0x4bce180dac6da60d0f3a2bdf036ffe9004f944c1"]
},
"gnosis": {
"validators": [
"0xd4df66a859585678f2ea8357161d896be19cc1ca",
@ -64,6 +67,9 @@
"injective": {
"validators": ["0xbfb8911b72cfb138c7ce517c57d9c691535dc517"]
},
"linea": {
"validators": ["0xf2d5409a59e0f5ae7635aff73685624904a77d94"]
},
"mantapacific": {
"validators": [
"0x8e668c97ad76d0e28375275c41ece4972ab8a5bc",
@ -95,6 +101,9 @@
"0x779a17e035018396724a6dec8a59bda1b5adf738"
]
},
"osmosis": {
"validators": ["0xea483af11c19fa41b16c31d1534c2a486a92bcac"]
},
"polygon": {
"validators": [
"0x12ecb319c7f4e8ac5eb5226662aeb8528c5cefac",
@ -119,6 +128,9 @@
"0x7210fa0a6be39a75cb14d682ebfb37e2b53ecbe5"
]
},
"sei": {
"validators": ["0x9920d2dbf6c85ffc228fdc2e810bf895732c6aa5"]
},
"viction": {
"validators": ["0x1f87c368f8e05a85ef9126d984a980a20930cb9c"]
},

@ -47,6 +47,9 @@
"0x87cf8a85465118aff9ec728ca157798201b1e368"
]
},
"fraxtal": {
"validators": ["0x8c772b730c8deb333dded14cb462e577a06283da"]
},
"gnosis": {
"validators": [
"0xd5122daa0c3dfc94a825ae928f3ea138cdb6a2e1",
@ -61,6 +64,9 @@
"0xe83d36fd00d9ef86243d9f7147b29e98d11df0ee"
]
},
"linea": {
"validators": ["0xad4886b6f5f5088c7ae53b69d1ff5cfc2a17bec4"]
},
"mantapacific": {
"validators": [
"0x84fcb05e6e5961df2dfd9f36e8f2b3e87ede7d76",
@ -116,6 +122,9 @@
"0x07c2f32a402543badc3141f6b98969d75ef2ac28"
]
},
"sei": {
"validators": ["0x846e48a7e85e5403cc690a347e1ad3c3dca11b6e"]
},
"viction": {
"validators": [
"0xe858971cd865b11d3e8fb6b6af72db0d85881baf",

@ -1,5 +1,7 @@
import { IRegistry } from '@hyperlane-xyz/registry';
import { ChainMap, ChainMetadata } from '@hyperlane-xyz/sdk';
import { getRegistryForEnvironment } from '../../../src/config/chain.js';
import { isEthereumProtocolChain } from '../../../src/utils/utils.js';
import { supportedChainNames } from './supportedChainNames.js';
@ -44,6 +46,13 @@ export const chainMetadataOverrides: ChainMap<Partial<ChainMetadata>> = {
gasPrice: 2 * 10 ** 9, // 2 gwei
},
},
sei: {
// Sei's `eth_feeHistory` is not to spec and incompatible with ethers-rs,
// so we force legacy transactions by setting a gas price.
transactionOverrides: {
gasPrice: 2 * 10 ** 9, // 2 gwei
},
},
moonbeam: {
transactionOverrides: {
maxFeePerGas: 350 * 10 ** 9, // 350 gwei
@ -51,3 +60,11 @@ export const chainMetadataOverrides: ChainMap<Partial<ChainMetadata>> = {
},
},
};
export const getRegistry = async (useSecrets = true): Promise<IRegistry> =>
getRegistryForEnvironment(
environment,
supportedChainNames,
chainMetadataOverrides,
useSecrets,
);

@ -7,7 +7,7 @@ import { environment } from './chains.js';
export const keyFunderConfig: KeyFunderConfig = {
docker: {
repo: 'gcr.io/abacus-labs-dev/hyperlane-monorepo',
tag: '7720875-20240531-072251',
tag: 'b134b04-20240605-133031',
},
// We're currently using the same deployer/key funder key as mainnet2.
// To minimize nonce clobbering we offset the key funder cron
@ -31,8 +31,10 @@ export const keyFunderConfig: KeyFunderConfig = {
bsc: '5',
celo: '3',
ethereum: '0.5',
fraxtal: '0.2',
gnosis: '5',
inevm: '3',
linea: '0.2',
mantapacific: '0.2',
mode: '0.2',
moonbeam: '5',
@ -41,6 +43,7 @@ export const keyFunderConfig: KeyFunderConfig = {
polygonzkevm: '0.5',
redstone: '0.2',
scroll: '0.5',
sei: '10',
viction: '3',
zetachain: '20',
},
@ -53,8 +56,10 @@ export const keyFunderConfig: KeyFunderConfig = {
bsc: '0.35',
celo: '150',
ethereum: '0.4',
fraxtal: '0',
gnosis: '100',
inevm: '0.05',
linea: '0',
mantapacific: '0',
mode: '0',
moonbeam: '250',
@ -63,6 +68,7 @@ export const keyFunderConfig: KeyFunderConfig = {
polygonzkevm: '0.05',
redstone: '0',
scroll: '0.05',
sei: '0',
viction: '0.05',
zetachain: '0',
},

@ -1,6 +1,6 @@
{
"arbitrum": {
"amount": "0.01",
"amount": "0.5",
"decimals": 9
},
"ancient8": {
@ -16,7 +16,7 @@
"decimals": 9
},
"blast": {
"amount": "0.1",
"amount": "0.5",
"decimals": 9
},
"bsc": {
@ -31,8 +31,28 @@
"amount": "20",
"decimals": 9
},
"fraxtal": {
"amount": "0.001000253",
"decimals": 9
},
"gnosis": {
"amount": "5.877696928",
"decimals": 9
},
"inevm": {
"amount": "0.1",
"decimals": 9
},
"injective": {
"amount": "700000000",
"decimals": 1
},
"linea": {
"amount": "0.110844655",
"decimals": 9
},
"mantapacific": {
"amount": "0.100163166",
"amount": "0.101967574",
"decimals": 9
},
"mode": {
@ -43,22 +63,22 @@
"amount": "125.0",
"decimals": 9
},
"neutron": {
"amount": "0.0053",
"decimals": 1
},
"optimism": {
"amount": "0.061126811",
"amount": "0.25",
"decimals": 9
},
"osmosis": {
"amount": "0.025",
"decimals": 1
},
"polygon": {
"amount": "101.76455238",
"decimals": 9
},
"gnosis": {
"amount": "3.236596353",
"decimals": 9
},
"scroll": {
"amount": "1.3231",
"decimals": 9
},
"polygonzkevm": {
"amount": "3.95",
"decimals": 9
@ -67,24 +87,20 @@
"amount": "0.0003",
"decimals": 9
},
"inevm": {
"amount": "0.1",
"scroll": {
"amount": "1.3231",
"decimals": 9
},
"sei": {
"amount": "1.0",
"decimals": 9
},
"viction": {
"amount": "0.25",
"decimals": 9
},
"neutron": {
"amount": "0.0053",
"decimals": 1
},
"injective": {
"amount": "700000000",
"decimals": 1
},
"zetachain": {
"amount": "0.0001",
"amount": "10.1",
"decimals": 9
}
}

@ -14,6 +14,7 @@ import { agents } from './agent.js';
import {
chainMetadataOverrides,
environment as environmentName,
getRegistry,
} from './chains.js';
import { core } from './core.js';
import { keyFunderConfig } from './funding.js';
@ -24,14 +25,6 @@ import { bridgeAdapterConfigs, relayerConfig } from './liquidityLayer.js';
import { owners } from './owners.js';
import { supportedChainNames } from './supportedChainNames.js';
const getRegistry = async (useSecrets = true): Promise<IRegistry> =>
getRegistryForEnvironment(
environmentName,
supportedChainNames,
chainMetadataOverrides,
useSecrets,
);
export const environment: EnvironmentConfig = {
environment: environmentName,
supportedChainNames,

@ -1767,6 +1767,68 @@
"name": "DomaingRoutingIsm"
}
],
"fraxtal": [
{
"address": "0x0761b0827849abbf7b0cC09CE14e1C93D87f5004",
"constructorArguments": "",
"isProxy": false,
"name": "StaticMerkleRootMultisigIsmFactory"
},
{
"address": "0x3b9f24fD2ecfed0d3A88fa7f0E4e5747671981D7",
"constructorArguments": "",
"isProxy": true,
"name": "StaticMerkleRootMultisigIsm"
},
{
"address": "0x4Ed7d626f1E96cD1C0401607Bf70D95243E3dEd1",
"constructorArguments": "",
"isProxy": false,
"name": "StaticMessageIdMultisigIsmFactory"
},
{
"address": "0x71DCcD21B912F7d4f636af0C9eA5DC0C10617354",
"constructorArguments": "",
"isProxy": true,
"name": "StaticMessageIdMultisigIsm"
},
{
"address": "0x2f2aFaE1139Ce54feFC03593FeE8AB2aDF4a85A7",
"constructorArguments": "",
"isProxy": false,
"name": "StaticAggregationIsmFactory"
},
{
"address": "0x7f51A658837A315134A97ff8B586d71B726B7e61",
"constructorArguments": "",
"isProxy": true,
"name": "StaticAggregationIsm"
},
{
"address": "0xeA87ae93Fa0019a82A727bfd3eBd1cFCa8f64f1D",
"constructorArguments": "",
"isProxy": false,
"name": "StaticAggregationHookFactory"
},
{
"address": "0xDFF18Bf286c9cDd0fC653a28616460Cf7443F8EF",
"constructorArguments": "",
"isProxy": true,
"name": "StaticAggregationHook"
},
{
"address": "0x3a464f746D23Ab22155710f44dB16dcA53e0775E",
"constructorArguments": "",
"isProxy": false,
"name": "DomainRoutingIsmFactory"
},
{
"address": "0x3a49EcAC1031612D66fa20D6F40f214aCeAc2B4B",
"constructorArguments": "",
"isProxy": true,
"name": "DomaingRoutingIsm"
}
],
"gnosis": [
{
"address": "0x8E273260EAd8B72A085B19346A676d355740e875",
@ -2173,6 +2235,68 @@
"name": "DomaingRoutingIsm"
}
],
"linea": [
{
"address": "0x0761b0827849abbf7b0cC09CE14e1C93D87f5004",
"constructorArguments": "",
"isProxy": false,
"name": "StaticMerkleRootMultisigIsmFactory"
},
{
"address": "0x3b9f24fD2ecfed0d3A88fa7f0E4e5747671981D7",
"constructorArguments": "",
"isProxy": true,
"name": "StaticMerkleRootMultisigIsm"
},
{
"address": "0x4Ed7d626f1E96cD1C0401607Bf70D95243E3dEd1",
"constructorArguments": "",
"isProxy": false,
"name": "StaticMessageIdMultisigIsmFactory"
},
{
"address": "0x71DCcD21B912F7d4f636af0C9eA5DC0C10617354",
"constructorArguments": "",
"isProxy": true,
"name": "StaticMessageIdMultisigIsm"
},
{
"address": "0x2f2aFaE1139Ce54feFC03593FeE8AB2aDF4a85A7",
"constructorArguments": "",
"isProxy": false,
"name": "StaticAggregationIsmFactory"
},
{
"address": "0x7f51A658837A315134A97ff8B586d71B726B7e61",
"constructorArguments": "",
"isProxy": true,
"name": "StaticAggregationIsm"
},
{
"address": "0xeA87ae93Fa0019a82A727bfd3eBd1cFCa8f64f1D",
"constructorArguments": "",
"isProxy": false,
"name": "StaticAggregationHookFactory"
},
{
"address": "0xDFF18Bf286c9cDd0fC653a28616460Cf7443F8EF",
"constructorArguments": "",
"isProxy": true,
"name": "StaticAggregationHook"
},
{
"address": "0x3a464f746D23Ab22155710f44dB16dcA53e0775E",
"constructorArguments": "",
"isProxy": false,
"name": "DomainRoutingIsmFactory"
},
{
"address": "0x3a49EcAC1031612D66fa20D6F40f214aCeAc2B4B",
"constructorArguments": "",
"isProxy": true,
"name": "DomaingRoutingIsm"
}
],
"mantapacific": [
{
"address": "0x882CD0C5D50b6dD74b36Da4BDb059507fddEDdf2",
@ -3323,6 +3447,68 @@
"name": "DomaingRoutingIsm"
}
],
"sei": [
{
"address": "0x2C1FAbEcd7bFBdEBF27CcdB67baADB38b6Df90fC",
"constructorArguments": "",
"isProxy": false,
"name": "StaticMerkleRootMultisigIsmFactory"
},
{
"address": "0x4725F7b8037513915aAf6D6CBDE2920E28540dDc",
"constructorArguments": "",
"isProxy": true,
"name": "StaticMerkleRootMultisigIsm"
},
{
"address": "0x8b83fefd896fAa52057798f6426E9f0B080FCCcE",
"constructorArguments": "",
"isProxy": false,
"name": "StaticMessageIdMultisigIsmFactory"
},
{
"address": "0xAF03386044373E2fe26C5b1dCedF5a7e854a7a3F",
"constructorArguments": "",
"isProxy": true,
"name": "StaticMessageIdMultisigIsm"
},
{
"address": "0x8F7454AC98228f3504Bb91eA3D8Adafe6406110A",
"constructorArguments": "",
"isProxy": false,
"name": "StaticAggregationIsmFactory"
},
{
"address": "0x882CD0C5D50b6dD74b36Da4BDb059507fddEDdf2",
"constructorArguments": "",
"isProxy": true,
"name": "StaticAggregationIsm"
},
{
"address": "0xEb9FcFDC9EfDC17c1EC5E1dc085B98485da213D6",
"constructorArguments": "",
"isProxy": false,
"name": "StaticAggregationHookFactory"
},
{
"address": "0x19930232E9aFC4f4F09d09fe2375680fAc2100D0",
"constructorArguments": "",
"isProxy": true,
"name": "StaticAggregationHook"
},
{
"address": "0x1052eF3419f26Bec74Ed7CEf4a4FA6812Bc09908",
"constructorArguments": "",
"isProxy": false,
"name": "DomainRoutingIsmFactory"
},
{
"address": "0x12Ed1BbA182CbC63692F813651BD493B7445C874",
"constructorArguments": "",
"isProxy": true,
"name": "DomaingRoutingIsm"
}
],
"viction": [
{
"address": "0x2C1FAbEcd7bFBdEBF27CcdB67baADB38b6Df90fC",

@ -39,6 +39,9 @@ export const safes: ChainMap<Address | undefined> = {
polygonzkevm: '0x1610f578D4d77Fc4ae7ce2DD9AA0b98A5Cd0a9b2',
// injective: 'inj1632x8j35kenryam3mkrsez064sqg2y2fr0frzt',
// solana: 'EzppBFV2taxWw8kEjxNYvby6q7W1biJEqwP3iC7YgRe3',
blast: '0xaCD1865B262C89Fb0b50dcc8fB095330ae8F35b5',
linea: '0xaCD1865B262C89Fb0b50dcc8fB095330ae8F35b5',
mode: '0xaCD1865B262C89Fb0b50dcc8fB095330ae8F35b5',
};
export const DEPLOYER = '0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba';

@ -9,18 +9,22 @@ export const supportedChainNames = [
'bsc',
'celo',
'ethereum',
'fraxtal',
'gnosis',
'inevm',
'injective',
'linea',
'mantapacific',
'mode',
'moonbeam',
'neutron',
'optimism',
'osmosis',
'polygon',
'polygonzkevm',
'redstone',
'scroll',
'sei',
'viction',
'zetachain',
];

@ -1,24 +1,28 @@
{
"arbitrum": "2919.87",
"ancient8": "2919.87",
"avalanche": "33.19",
"base": "2919.87",
"blast": "2919.87",
"bsc": "570.1",
"celo": "0.738559",
"ethereum": "2919.87",
"gnosis": "1.005",
"inevm": "21.59",
"mantapacific": "2919.87",
"mode": "2919.87",
"moonbeam": "0.253144",
"optimism": "2919.87",
"polygon": "0.663051",
"polygonzkevm": "2919.87",
"redstone": "2919.87",
"scroll": "2919.87",
"viction": "0.424231",
"zetachain": "1.53",
"injective": "21.59",
"neutron": "0.606906"
"arbitrum": "3537.12",
"ancient8": "3537.12",
"avalanche": "32.04",
"base": "3537.12",
"blast": "3537.12",
"bsc": "611.69",
"celo": "0.709793",
"ethereum": "3537.12",
"fraxtal": "3519.8",
"gnosis": "1.001",
"inevm": "28.61",
"injective": "28.61",
"linea": "3537.12",
"mantapacific": "3537.12",
"mode": "3537.12",
"moonbeam": "0.278261",
"neutron": "0.623705",
"optimism": "3537.12",
"osmosis": "0.649286",
"polygon": "0.643148",
"polygonzkevm": "3537.12",
"redstone": "3537.12",
"scroll": "3537.12",
"sei": "0.477119",
"viction": "0.404896",
"zetachain": "1.16"
}

@ -164,6 +164,18 @@ export const validatorChainConfig = (
'optimism',
),
},
osmosis: {
interval: 5,
reorgPeriod: getReorgPeriod('osmosis'),
validators: validatorsConfig(
{
[Contexts.Hyperlane]: ['0xea483af11c19fa41b16c31d1534c2a486a92bcac'],
[Contexts.ReleaseCandidate]: [],
[Contexts.Neutron]: [],
},
'osmosis',
),
},
moonbeam: {
interval: 5,
reorgPeriod: getReorgPeriod('moonbeam'),
@ -256,6 +268,48 @@ export const validatorChainConfig = (
'inevm',
),
},
fraxtal: {
interval: 5,
reorgPeriod: getReorgPeriod('fraxtal'),
validators: validatorsConfig(
{
[Contexts.Hyperlane]: ['0x4bce180dac6da60d0f3a2bdf036ffe9004f944c1'],
[Contexts.ReleaseCandidate]: [
'0x8c772b730c8deb333dded14cb462e577a06283da',
],
[Contexts.Neutron]: [],
},
'fraxtal',
),
},
linea: {
interval: 5,
reorgPeriod: getReorgPeriod('linea'),
validators: validatorsConfig(
{
[Contexts.Hyperlane]: ['0xf2d5409a59e0f5ae7635aff73685624904a77d94'],
[Contexts.ReleaseCandidate]: [
'0xad4886b6f5f5088c7ae53b69d1ff5cfc2a17bec4',
],
[Contexts.Neutron]: [],
},
'linea',
),
},
sei: {
interval: 5,
reorgPeriod: getReorgPeriod('sei'),
validators: validatorsConfig(
{
[Contexts.Hyperlane]: ['0x9920d2dbf6c85ffc228fdc2e810bf895732c6aa5'],
[Contexts.ReleaseCandidate]: [
'0x846e48a7e85e5403cc690a347e1ad3c3dca11b6e',
],
[Contexts.Neutron]: [],
},
'sei',
),
},
scroll: {
interval: 5,
reorgPeriod: getReorgPeriod('scroll'),

@ -0,0 +1,26 @@
{
"bsc": {
"router": "0x6266e803057fa68C35018C3FB0B59db7129C23BB"
},
"arbitrum": {
"router": "0xC8F280d3eC30746f77c28695827d309d16939BF1"
},
"optimism": {
"router": "0x1d1a210E71398c17FD7987eDF1dc347539bB541F"
},
"base": {
"router": "0x584BA77ec804f8B6A559D196661C0242C6844F49"
},
"blast": {
"router": "0x8C603c6BDf8a9d548fC5D2995750Cc25eF59183b"
},
"mode": {
"router": "0xcd95B8dF351400BF4cbAb340b6EfF2454aDB299E"
},
"linea": {
"router": "0xcd95B8dF351400BF4cbAb340b6EfF2454aDB299E"
},
"ethereum": {
"router": "0xdFf621F952c23972dFD3A9E5d7B9f6339e9c078B"
}
}

@ -89,6 +89,28 @@ const gasPaymentEnforcement: GasPaymentEnforcement[] = [
},
];
// Resource requests are based on observed usage found in https://abacusworks.grafana.net/d/FSR9YWr7k
const relayerResources = {
requests: {
cpu: '1000m',
memory: '4Gi',
},
};
const validatorResources = {
requests: {
cpu: '250m',
memory: '256Mi',
},
};
const scraperResources = {
requests: {
cpu: '100m',
memory: '1Gi',
},
};
const hyperlane: RootAgentConfig = {
...contextBase,
contextChainNames: hyperlaneContextAgentChainNames,
@ -98,7 +120,7 @@ const hyperlane: RootAgentConfig = {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: 'c9c5d37-20240510-014327',
tag: '0d12ff3-20240620-173353',
},
blacklist: [
...releaseCandidateHelloworldMatchingList,
@ -122,21 +144,24 @@ const hyperlane: RootAgentConfig = {
matchingList: routerMatchingList(plumetestnetSepoliaAddresses),
},
],
resources: relayerResources,
},
validators: {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: 'e09a360-20240520-090014',
tag: '0d12ff3-20240620-173353',
},
chains: validatorChainConfig(Contexts.Hyperlane),
resources: validatorResources,
},
scraper: {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: 'c9c5d37-20240510-014327',
tag: '0d12ff3-20240620-173353',
},
resources: scraperResources,
},
};
@ -149,19 +174,21 @@ const releaseCandidate: RootAgentConfig = {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: 'c9c5d37-20240510-014327',
tag: '0d12ff3-20240620-173353',
},
whitelist: [...releaseCandidateHelloworldMatchingList],
gasPaymentEnforcement,
transactionGasLimit: 750000,
resources: relayerResources,
},
validators: {
rpcConsensusType: RpcConsensusType.Fallback,
docker: {
repo,
tag: 'c9c5d37-20240510-014327',
tag: '0d12ff3-20240620-173353',
},
chains: validatorChainConfig(Contexts.ReleaseCandidate),
resources: validatorResources,
},
};

@ -50,7 +50,7 @@ export function setRegistry(reg: FileSystemRegistry) {
export function getRegistry(): FileSystemRegistry {
if (!registry) {
const registryUri = process.env.REGISTRY_URI || DEFAULT_REGISTRY_URI;
rootLogger.info({ registryUri }, 'Using registry URI');
rootLogger.debug({ registryUri }, 'Using registry URI');
registry = new FileSystemRegistry({
uri: registryUri,
logger: rootLogger.child({ module: 'infra-registry' }),

@ -1,7 +1,7 @@
{
"name": "@hyperlane-xyz/infra",
"description": "Infrastructure utilities for the Hyperlane Network",
"version": "3.15.0",
"version": "3.16.0",
"dependencies": {
"@arbitrum/sdk": "^3.0.0",
"@aws-sdk/client-iam": "^3.74.0",
@ -13,10 +13,10 @@
"@ethersproject/hardware-wallets": "^5.7.0",
"@ethersproject/providers": "^5.7.2",
"@google-cloud/secret-manager": "^5.5.0",
"@hyperlane-xyz/helloworld": "3.15.0",
"@hyperlane-xyz/registry": "^2.1.1",
"@hyperlane-xyz/sdk": "3.15.0",
"@hyperlane-xyz/utils": "3.15.0",
"@hyperlane-xyz/helloworld": "3.16.0",
"@hyperlane-xyz/registry": "2.1.1",
"@hyperlane-xyz/sdk": "3.16.0",
"@hyperlane-xyz/utils": "3.16.0",
"@nomiclabs/hardhat-etherscan": "^3.0.3",
"@solana/web3.js": "^1.78.0",
"asn1.js": "5.4.1",

@ -4,7 +4,6 @@ import yargs, { Argv } from 'yargs';
import { ChainAddresses, IRegistry } from '@hyperlane-xyz/registry';
import {
ChainMap,
ChainMetadata,
ChainName,
CoreConfig,
MultiProtocolProvider,
@ -16,7 +15,6 @@ import {
ProtocolType,
objFilter,
objMap,
objMerge,
promiseObjAll,
rootLogger,
symmetricDifference,
@ -36,10 +34,6 @@ import { getCurrentKubernetesContext } from '../src/agents/index.js';
import { getCloudAgentKey } from '../src/agents/key-utils.js';
import { CloudAgentKey } from '../src/agents/keys.js';
import { RootAgentConfig } from '../src/config/agent/agent.js';
import {
fetchProvider,
getSecretMetadataOverrides,
} from '../src/config/chain.js';
import {
AgentEnvironment,
DeployEnvironment,
@ -50,6 +44,7 @@ import { Role } from '../src/roles.js';
import {
assertContext,
assertRole,
filterRemoteDomainMetadata,
getInfraPath,
inCIMode,
readJSONAtPath,
@ -121,6 +116,18 @@ export function withChain<T>(args: Argv<T>) {
.alias('c', 'chain');
}
export function withChains<T>(args: Argv<T>) {
return (
args
.describe('chains', 'Set of chains to perform actions on.')
.array('chains')
.choices('chains', getChains())
// Ensure chains are unique
.coerce('chains', (chains: string[]) => Array.from(new Set(chains)))
.alias('c', 'chains')
);
}
export function withProtocol<T>(args: Argv<T>) {
return args
.describe('protocol', 'protocol type')
@ -424,6 +431,8 @@ export function writeAddresses(
module: Modules,
addressesMap: ChainMap<Record<string, Address>>,
) {
addressesMap = filterRemoteDomainMetadata(addressesMap);
if (isRegistryModule(environment, module)) {
for (const [chainName, addresses] of Object.entries(addressesMap)) {
getRegistry().updateChain({ chainName, addresses });

@ -12,6 +12,7 @@ import {
assertCorrectKubeContext,
getArgs,
withAgentRole,
withChains,
withContext,
} from '../agent-utils.js';
import { getConfigsBasedOnArgs } from '../core-utils.js';
@ -22,6 +23,7 @@ export class AgentCli {
agentConfig!: RootAgentConfig;
initialized = false;
dryRun = false;
chains?: string[];
public async runHelmCommand(command: HelmCommand) {
await this.init();
@ -30,12 +32,19 @@ export class AgentCli {
// make all the managers first to ensure config validity
for (const role of this.roles) {
switch (role) {
case Role.Validator:
for (const chain of this.agentConfig.contextChainNames[role]) {
case Role.Validator: {
const contextChainNames = this.agentConfig.contextChainNames[role];
const validatorChains = !this.chains
? contextChainNames
: contextChainNames.filter((chain: string) =>
this.chains!.includes(chain),
);
for (const chain of validatorChains) {
const key = `${role}-${chain}`;
managers[key] = new ValidatorHelmManager(this.agentConfig, chain);
}
break;
}
case Role.Relayer:
managers[role] = new RelayerHelmManager(this.agentConfig);
break;
@ -61,10 +70,18 @@ export class AgentCli {
protected async init() {
if (this.initialized) return;
const argv = await withAgentRole(withContext(getArgs()))
const argv = await withChains(withAgentRole(withContext(getArgs())))
.describe('dry-run', 'Run through the steps without making any changes')
.boolean('dry-run').argv;
if (
argv.chains &&
argv.chains.length > 0 &&
!argv.role.includes(Role.Validator)
) {
console.warn('Chain argument applies to validator role only. Ignoring.');
}
const { envConfig, agentConfig } = await getConfigsBasedOnArgs(argv);
await assertCorrectKubeContext(envConfig);
this.roles = argv.role;
@ -72,5 +89,6 @@ export class AgentCli {
this.agentConfig = agentConfig;
this.dryRun = argv.dryRun || false;
this.initialized = true;
this.chains = argv.chains;
}
}

@ -50,7 +50,7 @@ async function main() {
const validator = await InfraS3Validator.fromStorageLocation(location);
announcements.push({
storageLocation: validator.storageLocation(),
announcement: await validator.getAnnouncement(),
announcement: await validator.getSignedAnnouncement(),
});
} else if (location.startsWith('file://')) {
const announcementFilepath = path.join(
@ -97,7 +97,7 @@ async function main() {
);
announcements.push({
storageLocation: validator.storageLocation(),
announcement: await validator.getAnnouncement(),
announcement: await validator.getSignedAnnouncement(),
});
chains.push(validatorChain);
}

@ -13,6 +13,7 @@ import {
} from '@hyperlane-xyz/sdk';
import { Contexts } from '../config/contexts.js';
import { DEPLOYER } from '../config/environments/mainnet3/owners.js';
import { getWarpConfig } from '../config/warp.js';
import { HyperlaneAppGovernor } from '../src/govern/HyperlaneAppGovernor.js';
import { HyperlaneCoreGovernor } from '../src/govern/HyperlaneCoreGovernor.js';
@ -25,6 +26,7 @@ import {
Modules,
getAddresses,
getArgs as getRootArgs,
withChain,
withContext,
withModuleAndFork,
} from './agent-utils.js';
@ -32,14 +34,17 @@ import { getEnvironmentConfig, getHyperlaneCore } from './core-utils.js';
import { getHelloWorldApp } from './helloworld/utils.js';
function getArgs() {
return withModuleAndFork(withContext(getRootArgs()))
return withChain(withModuleAndFork(withContext(getRootArgs())))
.boolean('asDeployer')
.default('asDeployer', false)
.boolean('govern')
.default('govern', false)
.alias('g', 'govern').argv;
}
async function check() {
const { fork, govern, module, environment, context } = await getArgs();
const { fork, govern, module, environment, context, chain, asDeployer } =
await getArgs();
const envConfig = getEnvironmentConfig(environment);
let multiProvider = await envConfig.getMultiProvider();
@ -52,14 +57,17 @@ async function check() {
[fork]: { blocks: { confirmations: 0 } },
});
const owner = envConfig.core[fork].owner;
const owner = asDeployer ? DEPLOYER : envConfig.core[fork].owner;
const signer = await impersonateAccount(owner, 1e18);
multiProvider.setSigner(fork, signer);
}
}
const { core, chainAddresses } = await getHyperlaneCore(environment);
const { core, chainAddresses } = await getHyperlaneCore(
environment,
multiProvider,
);
const ismFactory = HyperlaneIsmFactory.fromAddressesMap(
chainAddresses,
multiProvider,
@ -141,6 +149,11 @@ async function check() {
if (govern) {
await governor.govern(false, fork);
}
} else if (chain) {
await governor.checker.checkChain(chain);
if (govern) {
await governor.govern(true, chain);
}
} else {
await governor.checker.check();
if (govern) {

@ -2,6 +2,7 @@ import { ethers } from 'ethers';
import path from 'path';
import prompts from 'prompts';
import { buildArtifact as coreBuildArtifact } from '@hyperlane-xyz/core/buildArtifact.js';
import { HelloWorldDeployer } from '@hyperlane-xyz/helloworld';
import {
ChainMap,
@ -20,7 +21,7 @@ import {
LiquidityLayerDeployer,
TestRecipientDeployer,
} from '@hyperlane-xyz/sdk';
import { objMap } from '@hyperlane-xyz/utils';
import { objFilter, objMap } from '@hyperlane-xyz/utils';
import { Contexts } from '../config/contexts.js';
import { core as coreConfig } from '../config/environments/mainnet3/core.js';
@ -33,6 +34,7 @@ import {
fetchExplorerApiKeys,
} from '../src/deployment/verify.js';
import { impersonateAccount, useLocalProvider } from '../src/utils/fork.js';
import { inCIMode } from '../src/utils/utils.js';
import {
Modules,
@ -40,7 +42,7 @@ import {
getArgs,
getModuleDirectory,
withBuildArtifactPath,
withChain,
withChains,
withConcurrentDeploy,
withContext,
withModuleAndFork,
@ -53,12 +55,12 @@ async function main() {
module,
fork,
environment,
chain,
buildArtifactPath,
chains,
concurrentDeploy,
} = await withContext(
withConcurrentDeploy(
withChain(withModuleAndFork(withBuildArtifactPath(getArgs()))),
withChains(withModuleAndFork(withBuildArtifactPath(getArgs()))),
),
).argv;
const envConfig = getEnvironmentConfig(environment);
@ -82,20 +84,16 @@ async function main() {
multiProvider.setSharedSigner(signer);
}
let contractVerifier;
if (buildArtifactPath) {
// fetch explorer API keys from GCP
const apiKeys = await fetchExplorerApiKeys();
// extract build artifact contents
const buildArtifact = extractBuildArtifact(buildArtifactPath);
// instantiate verifier
contractVerifier = new ContractVerifier(
multiProvider,
apiKeys,
buildArtifact,
ExplorerLicenseType.MIT,
);
}
// if none provided, instantiate a default verifier with the default core contract build artifact
// fetch explorer API keys from GCP
const contractVerifier = new ContractVerifier(
multiProvider,
inCIMode() ? {} : await fetchExplorerApiKeys(),
buildArtifactPath
? extractBuildArtifact(buildArtifactPath)
: coreBuildArtifact,
ExplorerLicenseType.MIT,
);
let config: ChainMap<unknown> = {};
let deployer: HyperlaneDeployer<any, any>;
@ -130,7 +128,11 @@ async function main() {
);
} else if (module === Modules.INTERCHAIN_GAS_PAYMASTER) {
config = envConfig.igp;
deployer = new HyperlaneIgpDeployer(multiProvider, contractVerifier);
deployer = new HyperlaneIgpDeployer(
multiProvider,
contractVerifier,
concurrentDeploy,
);
} else if (module === Modules.INTERCHAIN_ACCOUNTS) {
const { core } = await getHyperlaneCore(environment, multiProvider);
config = core.getRouterConfig(envConfig.owners);
@ -227,7 +229,12 @@ async function main() {
// prompt for confirmation in production environments
if (environment !== 'test' && !fork) {
const confirmConfig = chain ? config[chain] : config;
const confirmConfig =
chains && chains.length > 0
? objFilter(config, (chain, _): _ is unknown =>
(chains ?? []).includes(chain),
)
: config;
console.log(JSON.stringify(confirmConfig, null, 2));
const { value: confirmed } = await prompts({
type: 'confirm',
@ -240,13 +247,15 @@ async function main() {
}
}
await deployWithArtifacts(
config,
await deployWithArtifacts({
configMap: config as ChainMap<unknown>, // TODO: fix this typing
deployer,
cache,
chain ?? fork,
// Use chains if provided, otherwise deploy to all chains
// If fork is provided, deploy to fork only
targetNetworks: chains ?? !fork ? [] : [fork],
agentConfig,
);
});
}
main()

@ -1,26 +1,26 @@
import { Provider } from '@ethersproject/providers';
import { ethers } from 'ethers';
import {
ChainMap,
MultiProtocolProvider,
ProviderType,
} from '@hyperlane-xyz/sdk';
import { ChainMap, MultiProtocolProvider } from '@hyperlane-xyz/sdk';
import { ProtocolType } from '@hyperlane-xyz/utils';
// Intentionally circumvent `mainnet3/index.ts` and `getEnvironmentConfig('mainnet3')`
// to avoid circular dependencies.
import { getRegistry as getMainnet3Registry } from '../config/environments/mainnet3/chains.js';
import { supportedChainNames as mainnet3SupportedChainNames } from '../config/environments/mainnet3/supportedChainNames.js';
import {
GasPriceConfig,
getCosmosChainGasPrice,
} from '../src/config/gas-oracle.js';
import { getEnvironmentConfig } from './core-utils.js';
async function main() {
const environmentConfig = getEnvironmentConfig('mainnet3');
const mpp = await environmentConfig.getMultiProtocolProvider();
const registry = await getMainnet3Registry();
const chainMetadata = await registry.getMetadata();
const mpp = new MultiProtocolProvider(chainMetadata);
const prices: ChainMap<GasPriceConfig> = Object.fromEntries(
await Promise.all(
environmentConfig.supportedChainNames.map(async (chain) => [
mainnet3SupportedChainNames.map(async (chain) => [
chain,
await getGasPrice(mpp, chain),
]),
@ -34,16 +34,17 @@ async function getGasPrice(
mpp: MultiProtocolProvider,
chain: string,
): Promise<GasPriceConfig> {
const provider = mpp.getProvider(chain);
switch (provider.type) {
case ProviderType.EthersV5: {
const gasPrice = await provider.provider.getGasPrice();
const protocolType = mpp.getProtocol(chain);
switch (protocolType) {
case ProtocolType.Ethereum: {
const provider = mpp.getProvider(chain);
const gasPrice = await (provider.provider as Provider).getGasPrice();
return {
amount: ethers.utils.formatUnits(gasPrice, 'gwei'),
decimals: 9,
};
}
case ProviderType.CosmJsWasm: {
case ProtocolType.Cosmos: {
const { amount } = await getCosmosChainGasPrice(chain);
return {
@ -51,14 +52,14 @@ async function getGasPrice(
decimals: 1,
};
}
case ProviderType.SolanaWeb3:
case ProtocolType.Sealevel:
// TODO get a reasonable value
return {
amount: '0.001',
decimals: 9,
};
default:
throw new Error(`Unsupported provider type: ${provider.type}`);
throw new Error(`Unsupported protocol type: ${protocolType}`);
}
}

@ -1,16 +1,18 @@
import { objMap, pick } from '@hyperlane-xyz/utils';
import { getEnvironmentConfig } from './core-utils.js';
// Intentionally circumvent `mainnet3/index.ts` and `getEnvironmentConfig('mainnet3')`
// to avoid circular dependencies.
import { getRegistry as getMainnet3Registry } from '../config/environments/mainnet3/chains.js';
import { supportedChainNames as mainnet3SupportedChainNames } from '../config/environments/mainnet3/supportedChainNames.js';
const CURRENCY = 'usd';
async function main() {
const environmentConfig = getEnvironmentConfig('mainnet3');
const registry = await environmentConfig.getRegistry();
const registry = await getMainnet3Registry();
const chainMetadata = await registry.getMetadata();
const metadata = pick(
await registry.getMetadata(),
environmentConfig.supportedChainNames,
mainnet3SupportedChainNames,
);
const ids = objMap(

@ -10,6 +10,7 @@ import {
AgentContextConfig,
DockerConfig,
HelmRootAgentValues,
KubernetesResources,
RootAgentConfig,
} from '../config/agent/agent.js';
import { RelayerConfigHelper } from '../config/agent/relayer.js';
@ -138,7 +139,7 @@ export abstract class AgentHelmManager {
rpcConsensusType: this.rpcConsensusType(chain),
protocol: metadata.protocol,
blocks: { reorgPeriod },
maxBatchSize: 4,
maxBatchSize: 32,
};
}),
},
@ -151,7 +152,7 @@ export abstract class AgentHelmManager {
return RpcConsensusType.Single;
}
return this.config.rpcConsensusType;
return this.config.agentRoleConfig.rpcConsensusType;
}
async doesAgentReleaseExist() {
@ -169,7 +170,11 @@ export abstract class AgentHelmManager {
}
dockerImage(): DockerConfig {
return this.config.docker;
return this.config.agentRoleConfig.docker;
}
kubernetesResources(): KubernetesResources | undefined {
return this.config.agentRoleConfig.resources;
}
}
@ -216,6 +221,7 @@ export class RelayerHelmManager extends OmniscientAgentHelmManager {
enabled: true,
aws: this.config.requiresAwsCredentials,
config: await this.config.buildConfig(),
resources: this.kubernetesResources(),
};
const signers = await this.config.signers();
@ -244,6 +250,7 @@ export class ScraperHelmManager extends OmniscientAgentHelmManager {
values.hyperlane.scraper = {
enabled: true,
config: await this.config.buildConfig(),
resources: this.kubernetesResources(),
};
// scraper never requires aws credentials
values.hyperlane.aws = false;
@ -279,6 +286,7 @@ export class ValidatorHelmManager extends MultichainAgentHelmManager {
originChainName: cfg.originChainName,
interval: cfg.interval,
})),
resources: this.kubernetesResources(),
};
// The name of the helm release for agents is `hyperlane-agent`.

@ -89,8 +89,12 @@ export interface AgentContextConfig extends AgentEnvConfig {
// incomplete common agent configuration for a role
interface AgentRoleConfig {
// K8s-specific
docker: DockerConfig;
chainDockerOverrides?: Record<ChainName, Partial<DockerConfig>>;
resources?: KubernetesResources;
// Agent-specific
rpcConsensusType: RpcConsensusType;
index?: IndexingConfig;
}
@ -119,6 +123,16 @@ export interface DockerConfig {
tag: string;
}
export interface KubernetesResources {
requests?: KubernetesComputeResources;
limits?: KubernetesComputeResources;
}
export interface KubernetesComputeResources {
cpu: string;
memory: string;
}
export class RootAgentConfigHelper implements AgentContextConfig {
readonly rawConfig: RootAgentConfig;
@ -154,21 +168,14 @@ export class RootAgentConfigHelper implements AgentContextConfig {
}
}
export abstract class AgentConfigHelper<R = unknown>
extends RootAgentConfigHelper
implements AgentRoleConfig
{
rpcConsensusType: RpcConsensusType;
docker: DockerConfig;
chainDockerOverrides?: Record<ChainName, Partial<DockerConfig>>;
index?: IndexingConfig;
protected constructor(root: RootAgentConfig, agent: AgentRoleConfig) {
export abstract class AgentConfigHelper<
R = unknown,
> extends RootAgentConfigHelper {
protected constructor(
root: RootAgentConfig,
readonly agentRoleConfig: AgentRoleConfig,
) {
super(root);
this.rpcConsensusType = agent.rpcConsensusType;
this.docker = agent.docker;
this.chainDockerOverrides = agent.chainDockerOverrides;
this.index = agent.index;
}
// role this config is for
@ -178,13 +185,13 @@ export abstract class AgentConfigHelper<R = unknown>
// If the provided chain has an override, return the override, otherwise return the default.
dockerImageForChain(chainName: ChainName): DockerConfig {
if (this.chainDockerOverrides?.[chainName]) {
if (this.agentRoleConfig.chainDockerOverrides?.[chainName]) {
return {
...this.docker,
...this.chainDockerOverrides[chainName],
...this.agentRoleConfig.docker,
...this.agentRoleConfig.chainDockerOverrides[chainName],
};
}
return this.docker;
return this.agentRoleConfig.docker;
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save