feat: forward-backward message processor (#3775)

### Description

- adds logic for iterating message nonces in the processor task in
forward-backward fashion
- removes all manual calls that were updating the next nonce to query.
This is done automatically in the iterator struct now
- stores the highest known message nonce in the db, which is used to
initialize the iterator

### Drive-by changes

- Converts the concrete HyperlaneDb type stored in the processor to a
trait obj, to enable mocking db responses

### Related issues

- Fixes https://github.com/hyperlane-xyz/hyperlane-monorepo/issues/3796
- Fixes https://github.com/hyperlane-xyz/hyperlane-monorepo/issues/3816

### Backward compatibility

Yes - if there is no db entry for the new prefix, the processor starts
from nonce zero, so no migration is required

### Testing

Unit testing + e2e
cli-patches
Daniel Savu 6 months ago committed by GitHub
parent 704675c7e1
commit 214f503e53
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 1
      rust/Cargo.lock
  2. 1
      rust/agents/relayer/Cargo.toml
  3. 342
      rust/agents/relayer/src/msg/processor.rs
  4. 2
      rust/chains/hyperlane-ethereum/src/contracts/mailbox.rs
  5. 26
      rust/hyperlane-base/src/contract_sync/cursors/sequence_aware/backward.rs
  6. 22
      rust/hyperlane-base/src/contract_sync/cursors/sequence_aware/forward.rs
  7. 58
      rust/hyperlane-base/src/db/rocks/hyperlane_db.rs

1
rust/Cargo.lock generated

@ -7032,6 +7032,7 @@ dependencies = [
"hyperlane-ethereum",
"hyperlane-test",
"itertools 0.12.0",
"mockall",
"num-derive 0.4.1",
"num-traits",
"once_cell",

@ -43,6 +43,7 @@ hyperlane-ethereum = { path = "../../chains/hyperlane-ethereum" }
[dev-dependencies]
once_cell.workspace = true
mockall.worksapce = true
tokio-test.workspace = true
hyperlane-test = { path = "../../hyperlane-test" }
hyperlane-base = { path = "../../hyperlane-base", features = ["test-utils"] }

@ -1,4 +1,5 @@
use std::{
cmp::max,
collections::HashMap,
fmt::{Debug, Formatter},
sync::Arc,
@ -8,11 +9,14 @@ use std::{
use async_trait::async_trait;
use derive_new::new;
use eyre::Result;
use hyperlane_base::{db::HyperlaneRocksDB, CoreMetrics};
use hyperlane_base::{
db::{HyperlaneRocksDB, ProcessMessage},
CoreMetrics,
};
use hyperlane_core::{HyperlaneDomain, HyperlaneMessage};
use prometheus::IntGauge;
use tokio::sync::mpsc::UnboundedSender;
use tracing::{debug, trace};
use tracing::{debug, instrument, trace};
use super::{metadata::AppContextClassifier, op_queue::QueueOperation, pending_message::*};
use crate::{processor::ProcessorExt, settings::matching_list::MatchingList};
@ -20,9 +24,7 @@ use crate::{processor::ProcessorExt, settings::matching_list::MatchingList};
/// Finds unprocessed messages from an origin and submits then through a channel
/// for to the appropriate destination.
#[allow(clippy::too_many_arguments)]
#[derive(new)]
pub struct MessageProcessor {
db: HyperlaneRocksDB,
whitelist: Arc<MatchingList>,
blacklist: Arc<MatchingList>,
metrics: MessageProcessorMetrics,
@ -32,16 +34,187 @@ pub struct MessageProcessor {
/// Needed context to send a message for each destination chain
destination_ctxs: HashMap<u32, Arc<MessageContext>>,
metric_app_contexts: Vec<(MatchingList, String)>,
#[new(default)]
message_nonce: u32,
nonce_iterator: ForwardBackwardIterator,
}
#[derive(Debug)]
struct ForwardBackwardIterator {
low_nonce_iter: DirectionalNonceIterator,
high_nonce_iter: DirectionalNonceIterator,
// here for debugging purposes
_domain: String,
}
impl ForwardBackwardIterator {
#[instrument(skip(db), ret)]
fn new(db: Arc<dyn ProcessMessage>) -> Self {
let high_nonce = db.retrieve_highest_seen_message_nonce().ok().flatten();
let domain = db.domain().name().to_owned();
let high_nonce_iter = DirectionalNonceIterator::new(
// If the high nonce is None, we start from the beginning
high_nonce.unwrap_or_default().into(),
NonceDirection::High,
db.clone(),
domain.clone(),
);
let mut low_nonce_iter =
DirectionalNonceIterator::new(high_nonce, NonceDirection::Low, db, domain.clone());
// Decrement the low nonce to avoid processing the same message twice, which causes double counts in metrics
low_nonce_iter.iterate();
debug!(
?low_nonce_iter,
?high_nonce_iter,
?domain,
"Initialized ForwardBackwardIterator"
);
Self {
low_nonce_iter,
high_nonce_iter,
_domain: domain,
}
}
async fn try_get_next_message(
&mut self,
metrics: &MessageProcessorMetrics,
) -> Result<Option<HyperlaneMessage>> {
loop {
let high_nonce_message_status = self.high_nonce_iter.try_get_next_nonce(metrics)?;
let low_nonce_message_status = self.low_nonce_iter.try_get_next_nonce(metrics)?;
// Always prioritize the high nonce message
match (high_nonce_message_status, low_nonce_message_status) {
// Keep iterating if only processed messages are found
(MessageStatus::Processed, _) => {
self.high_nonce_iter.iterate();
}
(_, MessageStatus::Processed) => {
self.low_nonce_iter.iterate();
}
// Otherwise return - either a processable message or nothing to process
(MessageStatus::Processable(high_nonce_message), _) => {
self.high_nonce_iter.iterate();
return Ok(Some(high_nonce_message));
}
(_, MessageStatus::Processable(low_nonce_message)) => {
self.low_nonce_iter.iterate();
return Ok(Some(low_nonce_message));
}
(MessageStatus::Unindexed, MessageStatus::Unindexed) => return Ok(None),
}
// This loop may iterate through millions of processed messages, blocking the runtime.
// So, to avoid starving other futures in this task, yield to the runtime
// on each iteration
tokio::task::yield_now().await;
}
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq)]
enum NonceDirection {
#[default]
High,
Low,
}
#[derive(new)]
struct DirectionalNonceIterator {
nonce: Option<u32>,
direction: NonceDirection,
db: Arc<dyn ProcessMessage>,
domain_name: String,
}
impl Debug for DirectionalNonceIterator {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"DirectionalNonceIterator {{ nonce: {:?}, direction: {:?}, domain: {:?} }}",
self.nonce, self.direction, self.domain_name
)
}
}
impl DirectionalNonceIterator {
#[instrument]
fn iterate(&mut self) {
match self.direction {
NonceDirection::High => self.nonce = self.nonce.map(|n| n.saturating_add(1)),
NonceDirection::Low => {
if let Some(nonce) = self.nonce {
// once the message with nonce zero is processed, we should stop going backwards
self.nonce = nonce.checked_sub(1);
}
}
}
}
fn try_get_next_nonce(
&mut self,
metrics: &MessageProcessorMetrics,
) -> Result<MessageStatus<HyperlaneMessage>> {
if let Some(message) = self.indexed_message_with_nonce()? {
Self::update_max_nonce_gauge(&message, metrics);
if !self.is_message_processed()? {
return Ok(MessageStatus::Processable(message));
} else {
return Ok(MessageStatus::Processed);
}
}
Ok(MessageStatus::Unindexed)
}
fn update_max_nonce_gauge(message: &HyperlaneMessage, metrics: &MessageProcessorMetrics) {
let current_max = metrics.max_last_known_message_nonce_gauge.get();
metrics
.max_last_known_message_nonce_gauge
.set(max(current_max, message.nonce as i64));
if let Some(metrics) = metrics.get(message.destination) {
metrics.set(message.nonce as i64);
}
}
fn indexed_message_with_nonce(&self) -> Result<Option<HyperlaneMessage>> {
match self.nonce {
Some(nonce) => {
let msg = self.db.retrieve_message_by_nonce(nonce)?;
Ok(msg)
}
None => Ok(None),
}
}
fn is_message_processed(&self) -> Result<bool> {
let Some(nonce) = self.nonce else {
return Ok(false);
};
let processed = self.db.retrieve_processed_by_nonce(nonce)?.unwrap_or(false);
if processed {
trace!(
nonce,
domain = self.db.domain().name(),
"Message already marked as processed in DB"
);
}
Ok(processed)
}
}
#[derive(Debug)]
enum MessageStatus<T> {
/// The message wasn't indexed yet so can't be processed.
Unindexed,
// The message was indexed and is ready to be processed.
Processable(T),
// The message was indexed and already processed.
Processed,
}
impl Debug for MessageProcessor {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"MessageProcessor {{ whitelist: {:?}, blacklist: {:?}, message_nonce: {:?} }}",
self.whitelist, self.blacklist, self.message_nonce
"MessageProcessor {{ whitelist: {:?}, blacklist: {:?}, nonce_iterator: {:?}}}",
self.whitelist, self.blacklist, self.nonce_iterator
)
}
}
@ -50,7 +223,7 @@ impl Debug for MessageProcessor {
impl ProcessorExt for MessageProcessor {
/// The domain this processor is getting messages from.
fn domain(&self) -> &HyperlaneDomain {
self.db.domain()
self.nonce_iterator.high_nonce_iter.db.domain()
}
/// One round of processing, extracted from infinite work loop for
@ -61,35 +234,31 @@ impl ProcessorExt for MessageProcessor {
// self.tx_msg and then continue the scan at the next highest
// nonce.
// Scan until we find next nonce without delivery confirmation.
if let Some(msg) = self.try_get_unprocessed_message()? {
if let Some(msg) = self.try_get_unprocessed_message().await? {
debug!(?msg, "Processor working on message");
let destination = msg.destination;
// Skip if not whitelisted.
if !self.whitelist.msg_matches(&msg, true) {
debug!(?msg, whitelist=?self.whitelist, "Message not whitelisted, skipping");
self.message_nonce += 1;
return Ok(());
}
// Skip if the message is blacklisted
if self.blacklist.msg_matches(&msg, false) {
debug!(?msg, blacklist=?self.blacklist, "Message blacklisted, skipping");
self.message_nonce += 1;
return Ok(());
}
// Skip if the message is intended for this origin
if destination == self.domain().id() {
debug!(?msg, "Message destined for self, skipping");
self.message_nonce += 1;
return Ok(());
}
// Skip if the message is intended for a destination we do not service
if !self.send_channels.contains_key(&destination) {
debug!(?msg, "Message destined for unknown domain, skipping");
self.message_nonce += 1;
return Ok(());
}
@ -106,7 +275,6 @@ impl ProcessorExt for MessageProcessor {
app_context,
);
self.send_channels[&destination].send(Box::new(pending_msg) as QueueOperation)?;
self.message_nonce += 1;
} else {
tokio::time::sleep(Duration::from_secs(1)).await;
}
@ -115,34 +283,36 @@ impl ProcessorExt for MessageProcessor {
}
impl MessageProcessor {
fn try_get_unprocessed_message(&mut self) -> Result<Option<HyperlaneMessage>> {
loop {
// First, see if we can find the message so we can update the gauge.
if let Some(message) = self.db.retrieve_message_by_nonce(self.message_nonce)? {
// Update the latest nonce gauges
self.metrics
.max_last_known_message_nonce_gauge
.set(message.nonce as i64);
if let Some(metrics) = self.metrics.get(message.destination) {
metrics.set(message.nonce as i64);
}
pub fn new(
db: HyperlaneRocksDB,
whitelist: Arc<MatchingList>,
blacklist: Arc<MatchingList>,
metrics: MessageProcessorMetrics,
send_channels: HashMap<u32, UnboundedSender<QueueOperation>>,
destination_ctxs: HashMap<u32, Arc<MessageContext>>,
metric_app_contexts: Vec<(MatchingList, String)>,
) -> Self {
Self {
whitelist,
blacklist,
metrics,
send_channels,
destination_ctxs,
metric_app_contexts,
nonce_iterator: ForwardBackwardIterator::new(Arc::new(db) as Arc<dyn ProcessMessage>),
}
}
// If this message has already been processed, on to the next one.
if !self
.db
.retrieve_processed_by_nonce(&self.message_nonce)?
.unwrap_or(false)
{
return Ok(Some(message));
} else {
debug!(nonce=?self.message_nonce, "Message already marked as processed in DB");
self.message_nonce += 1;
}
} else {
trace!(nonce=?self.message_nonce, "No message found in DB for nonce");
return Ok(None);
}
async fn try_get_unprocessed_message(&mut self) -> Result<Option<HyperlaneMessage>> {
trace!(nonce_iterator=?self.nonce_iterator, "Trying to get the next processor message");
let next_message = self
.nonce_iterator
.try_get_next_message(&self.metrics)
.await?;
if next_message.is_none() {
trace!(nonce_iterator=?self.nonce_iterator, "No message found in DB for nonce");
}
Ok(next_message)
}
}
@ -197,7 +367,7 @@ mod test {
use super::*;
use hyperlane_base::{
db::{test_utils, HyperlaneRocksDB},
db::{test_utils, DbResult, HyperlaneRocksDB},
settings::{ChainConf, ChainConnectionConf, Settings},
};
use hyperlane_test::mocks::{MockMailboxContract, MockValidatorAnnounceContract};
@ -387,6 +557,21 @@ mod test {
pending_messages
}
mockall::mock! {
pub Db {}
impl Debug for Db {
fn fmt<'a>(&self, f: &mut std::fmt::Formatter<'a>) -> std::fmt::Result;
}
impl ProcessMessage for Db {
fn retrieve_highest_seen_message_nonce(&self) -> DbResult<Option<u32>>;
fn retrieve_message_by_nonce(&self, nonce: u32) -> DbResult<Option<HyperlaneMessage>>;
fn retrieve_processed_by_nonce(&self, nonce: u32) -> DbResult<Option<bool>>;
fn domain(&self) -> &HyperlaneDomain;
}
}
#[tokio::test]
async fn test_full_pending_message_persistence_flow() {
test_utils::run_test_db(|db| async move {
@ -441,4 +626,77 @@ mod test {
})
.await;
}
#[tokio::test]
async fn test_forward_backward_iterator() {
let mut mock_db = MockDb::new();
const MAX_ONCHAIN_NONCE: u32 = 4;
const MOCK_HIGHEST_SEEN_NONCE: u32 = 2;
// How many times the db was queried for the max onchain nonce message
let mut retrieve_calls_for_max_onchain_nonce = 0;
mock_db
.expect_domain()
.return_const(dummy_domain(0, "dummy_domain"));
mock_db
.expect_retrieve_highest_seen_message_nonce()
.returning(|| Ok(Some(MOCK_HIGHEST_SEEN_NONCE)));
mock_db
.expect_retrieve_message_by_nonce()
.returning(move |nonce| {
// return `None` the first time we get a query for the last message
// (the `MAX_ONCHAIN_NONCE`th one), to simulate an ongoing indexing that hasn't finished
if nonce == MAX_ONCHAIN_NONCE && retrieve_calls_for_max_onchain_nonce == 0 {
retrieve_calls_for_max_onchain_nonce += 1;
return Ok(None);
}
// otherwise return a message for every nonce in the closed
// interval [0, MAX_ONCHAIN_NONCE]
if nonce > MAX_ONCHAIN_NONCE {
Ok(None)
} else {
Ok(Some(dummy_hyperlane_message(
&dummy_domain(1, "dummy_domain"),
nonce,
)))
}
});
// The messages must be marked as "not processed" in the db for them to be returned
// when the iterator queries them
mock_db
.expect_retrieve_processed_by_nonce()
.returning(|_| Ok(Some(false)));
let dummy_metrics = dummy_processor_metrics(0);
let db = Arc::new(mock_db);
let mut forward_backward_iterator = ForwardBackwardIterator::new(db.clone());
let mut messages = vec![];
while let Some(msg) = forward_backward_iterator
.try_get_next_message(&dummy_metrics)
.await
.unwrap()
{
messages.push(msg.nonce);
}
// we start with 2 (MOCK_HIGHEST_SEEN_NONCE) as the highest seen nonce,
// so we go forward and get 3.
// then we try going forward again but get a `None` (not indexed yet), for nonce 4 (MAX_ONCHAIN_NONCE).
// then we go backwards once and get 1.
// then retry the forward iteration, which should return a message the second time, for nonce 4.
// finally, going forward again returns None so we go backward and get 0.
assert_eq!(messages, vec![2, 3, 1, 4, 0]);
// the final bounds of the iterator are (None, MAX_ONCHAIN_NONCE + 1), where None means
// the backward iterator has reached the beginning (iterated past nonce 0)
assert_eq!(forward_backward_iterator.low_nonce_iter.nonce, None);
assert_eq!(
forward_backward_iterator.high_nonce_iter.nonce,
Some(MAX_ONCHAIN_NONCE + 1)
);
}
}

@ -164,7 +164,7 @@ impl<M> SequenceAwareIndexer<HyperlaneMessage> for EthereumMailboxIndexer<M>
where
M: Middleware + 'static,
{
#[instrument(err, skip(self))]
#[instrument(err, skip(self), ret)]
async fn latest_sequence_count_and_tip(&self) -> ChainResult<(Option<u32>, u32)> {
let tip = Indexer::<HyperlaneMessage>::get_finalized_block_number(self).await?;
let sequence = self.contract.nonce().block(u64::from(tip)).call().await?;

@ -9,12 +9,11 @@ use hyperlane_core::{
HyperlaneSequenceAwareIndexerStoreReader, IndexMode, Indexed, LogMeta, SequenceIndexed,
};
use itertools::Itertools;
use tracing::{debug, warn};
use tracing::{debug, instrument, warn};
use super::{LastIndexedSnapshot, TargetSnapshot};
/// A sequence-aware cursor that syncs backward until there are no earlier logs to index.
#[derive(Debug)]
pub(crate) struct BackwardSequenceAwareSyncCursor<T> {
/// The max chunk size to query for logs.
/// If in sequence mode, this is the max number of sequences to query.
@ -34,6 +33,11 @@ pub(crate) struct BackwardSequenceAwareSyncCursor<T> {
}
impl<T: Debug> BackwardSequenceAwareSyncCursor<T> {
#[instrument(
skip(db),
fields(chunk_size, next_sequence, start_block, index_mode),
ret
)]
pub fn new(
chunk_size: u32,
db: Arc<dyn HyperlaneSequenceAwareIndexerStoreReader<T>>,
@ -61,6 +65,7 @@ impl<T: Debug> BackwardSequenceAwareSyncCursor<T> {
/// Gets the next range of logs to query.
/// If the cursor is fully synced, this returns None.
/// Otherwise, it returns the next range to query, either by block or sequence depending on the mode.
#[instrument(ret)]
pub async fn get_next_range(&mut self) -> Result<Option<RangeInclusive<u32>>> {
// Skip any already indexed logs.
self.skip_indexed().await?;
@ -129,6 +134,11 @@ impl<T: Debug> BackwardSequenceAwareSyncCursor<T> {
// If the sequence hasn't been indexed, break out of the loop.
break;
}
// We've noticed that this loop can run for a long time because the `await`
// points never yield.
// So, to avoid starving other futures in this task, yield to the runtime
// on each iteration
tokio::task::yield_now().await;
}
Ok(())
@ -299,6 +309,17 @@ impl<T: Debug> BackwardSequenceAwareSyncCursor<T> {
}
}
impl<T: Debug> Debug for BackwardSequenceAwareSyncCursor<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BackwardSequenceAwareSyncCursor")
.field("chunk_size", &self.chunk_size)
.field("current_indexing_snapshot", &self.current_indexing_snapshot)
.field("last_indexed_snapshot", &self.last_indexed_snapshot)
.field("index_mode", &self.index_mode)
.finish()
}
}
#[async_trait]
impl<T: Send + Sync + Clone + Debug + 'static> ContractSyncCursor<T>
for BackwardSequenceAwareSyncCursor<T>
@ -329,6 +350,7 @@ impl<T: Send + Sync + Clone + Debug + 'static> ContractSyncCursor<T>
/// ## logs
/// The logs to ingest. If any logs are duplicated or their sequence is higher than the current indexing snapshot,
/// they are filtered out.
#[instrument(err, ret, skip(logs), fields(range=?range, logs=?logs.iter().map(|(log, _)| log.sequence).collect::<Vec<_>>()))]
async fn update(
&mut self,
logs: Vec<(Indexed<T>, LogMeta)>,

@ -13,12 +13,11 @@ use hyperlane_core::{
SequenceIndexed,
};
use itertools::Itertools;
use tracing::{debug, warn};
use tracing::{debug, instrument, warn};
use super::{LastIndexedSnapshot, TargetSnapshot};
/// A sequence-aware cursor that syncs forwards in perpetuity.
#[derive(Debug)]
pub(crate) struct ForwardSequenceAwareSyncCursor<T> {
/// The max chunk size to query for logs.
/// If in sequence mode, this is the max number of sequences to query.
@ -43,6 +42,11 @@ pub(crate) struct ForwardSequenceAwareSyncCursor<T> {
}
impl<T: Debug> ForwardSequenceAwareSyncCursor<T> {
#[instrument(
skip(db, latest_sequence_querier),
fields(chunk_size, next_sequence, start_block, index_mode),
ret
)]
pub fn new(
chunk_size: u32,
latest_sequence_querier: Arc<dyn SequenceAwareIndexer<T>>,
@ -76,6 +80,7 @@ impl<T: Debug> ForwardSequenceAwareSyncCursor<T> {
/// If there are no logs to index, returns `None`.
/// If there are logs to index, returns the range of logs, either by sequence or block number
/// depending on the mode.
#[instrument(ret)]
pub async fn get_next_range(&mut self) -> Result<Option<RangeInclusive<u32>>> {
// Skip any already indexed logs.
self.skip_indexed().await?;
@ -386,6 +391,18 @@ impl<T: Debug> ForwardSequenceAwareSyncCursor<T> {
}
}
impl<T: Debug> Debug for ForwardSequenceAwareSyncCursor<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ForwardSequenceAwareSyncCursor")
.field("chunk_size", &self.chunk_size)
.field("current_indexing_snapshot", &self.current_indexing_snapshot)
.field("last_indexed_snapshot", &self.last_indexed_snapshot)
.field("target_snapshot", &self.target_snapshot)
.field("index_mode", &self.index_mode)
.finish()
}
}
#[async_trait]
impl<T: Send + Sync + Clone + Debug + 'static> ContractSyncCursor<T>
for ForwardSequenceAwareSyncCursor<T>
@ -420,6 +437,7 @@ impl<T: Send + Sync + Clone + Debug + 'static> ContractSyncCursor<T>
/// - Even if the logs include a gap, in practice these logs will have already been inserted into the DB.
/// This means that while gaps result in a rewind here, already known logs may be "fast forwarded" through,
/// and the cursor won't actually end up re-indexing already known logs.
#[instrument(err, ret, skip(logs), fields(range=?range, logs=?logs.iter().map(|(log, _)| log.sequence).collect::<Vec<_>>()))]
async fn update(
&mut self,
logs: Vec<(Indexed<T>, LogMeta)>,

@ -23,6 +23,7 @@ const MESSAGE_DISPATCHED_BLOCK_NUMBER: &str = "message_dispatched_block_number_"
const MESSAGE: &str = "message_";
const NONCE_PROCESSED: &str = "nonce_processed_";
const GAS_PAYMENT_BY_SEQUENCE: &str = "gas_payment_by_sequence_";
const HIGHEST_SEEN_MESSAGE_NONCE: &str = "highest_seen_message_nonce_";
const GAS_PAYMENT_FOR_MESSAGE_ID: &str = "gas_payment_sequence_for_message_id_v2_";
const GAS_PAYMENT_META_PROCESSED: &str = "gas_payment_meta_processed_v3_";
const GAS_EXPENDITURE_FOR_MESSAGE_ID: &str = "gas_expenditure_for_message_id_v2_";
@ -34,7 +35,8 @@ const MERKLE_TREE_INSERTION_BLOCK_NUMBER_BY_LEAF_INDEX: &str =
"merkle_tree_insertion_block_number_by_leaf_index_";
const LATEST_INDEXED_GAS_PAYMENT_BLOCK: &str = "latest_indexed_gas_payment_block";
type DbResult<T> = std::result::Result<T, DbError>;
/// Rocks DB result type
pub type DbResult<T> = std::result::Result<T, DbError>;
/// DB handle for storing data tied to a specific Mailbox.
#[derive(Debug, Clone)]
@ -94,6 +96,8 @@ impl HyperlaneRocksDB {
self.store_message_by_id(&id, message)?;
// - `nonce` --> `id`
self.store_message_id_by_nonce(&message.nonce, &id)?;
// Update the max seen nonce to allow forward-backward iteration in the processor
self.try_update_max_seen_message_nonce(message.nonce)?;
// - `nonce` --> `dispatched block number`
self.store_dispatched_block_number_by_nonce(&message.nonce, &dispatched_block_number)?;
Ok(true)
@ -108,6 +112,22 @@ impl HyperlaneRocksDB {
}
}
/// Update the nonce of the highest processed message we're aware of
pub fn try_update_max_seen_message_nonce(&self, nonce: u32) -> DbResult<()> {
let current_max = self
.retrieve_highest_seen_message_nonce()?
.unwrap_or_default();
if nonce >= current_max {
self.store_highest_seen_message_nonce_number(&Default::default(), &nonce)?;
}
Ok(())
}
/// Retrieve the nonce of the highest processed message we're aware of
pub fn retrieve_highest_seen_message_nonce(&self) -> DbResult<Option<u32>> {
self.retrieve_highest_seen_message_nonce_number(&Default::default())
}
/// If the provided gas payment, identified by its metadata, has not been
/// processed, processes the gas payment and records it as processed.
/// Returns whether the gas payment was processed for the first time.
@ -416,6 +436,39 @@ impl HyperlaneWatermarkedLogStore<MerkleTreeInsertion> for HyperlaneRocksDB {
}
}
/// Database interface required for processing messages
pub trait ProcessMessage: Send + Sync {
/// Retrieve the nonce of the highest processed message we're aware of
fn retrieve_highest_seen_message_nonce(&self) -> DbResult<Option<u32>>;
/// Retrieve a message by its nonce
fn retrieve_message_by_nonce(&self, nonce: u32) -> DbResult<Option<HyperlaneMessage>>;
/// Retrieve whether a message has been processed
fn retrieve_processed_by_nonce(&self, nonce: u32) -> DbResult<Option<bool>>;
/// Get the origin domain of the database
fn domain(&self) -> &HyperlaneDomain;
}
impl ProcessMessage for HyperlaneRocksDB {
fn retrieve_highest_seen_message_nonce(&self) -> DbResult<Option<u32>> {
self.retrieve_highest_seen_message_nonce()
}
fn retrieve_message_by_nonce(&self, nonce: u32) -> DbResult<Option<HyperlaneMessage>> {
self.retrieve_message_by_nonce(nonce)
}
fn retrieve_processed_by_nonce(&self, nonce: u32) -> DbResult<Option<bool>> {
self.retrieve_processed_by_nonce(&nonce)
}
fn domain(&self) -> &HyperlaneDomain {
self.domain()
}
}
/// Generate a call to ChainSetup for the given builder
macro_rules! make_store_and_retrieve {
($vis:vis, $name_suffix:ident, $key_prefix: ident, $key_ty:ty, $val_ty:ty$(,)?) => {
@ -479,3 +532,6 @@ make_store_and_retrieve!(
u32,
u64
);
// There's no unit struct Encode/Decode impl, so just use `bool`, have visibility be private (by omitting the first argument), and wrap
// with a function that always uses the `Default::default()` key
make_store_and_retrieve!(, highest_seen_message_nonce_number, HIGHEST_SEEN_MESSAGE_NONCE, bool, u32);

Loading…
Cancel
Save