Add relayer scaffold (#225)
* Add relayer scaffold * Lint * Add Outbox indexing to relayer and determine messages destined for inbox (#227) * Add Outbox indexing to relayer and determine messages destined for inbox * Lint * PR review * Support message processing in relayer (#232) * Support message processing in relayer * Lint * lint * PR review * Add clarifying comment * Be more specific between polling_interval and submission_latencypull/276/head
parent
9a7de7f117
commit
d3fbd1f9f9
@ -0,0 +1,162 @@ |
||||
use std::{sync::Arc, time::Duration}; |
||||
|
||||
use abacus_base::{CachingInbox, CheckpointSyncer, LocalStorage}; |
||||
use abacus_core::{db::AbacusDB, AbacusCommon, CommittedMessage, Inbox}; |
||||
use color_eyre::Result; |
||||
use tokio::{task::JoinHandle, time::sleep}; |
||||
use tracing::{debug, info_span, instrument::Instrumented, Instrument}; |
||||
|
||||
use crate::tip_prover::{MessageBatch, TipProver}; |
||||
|
||||
pub(crate) struct CheckpointRelayer { |
||||
polling_interval: u64, |
||||
/// The minimum latency in seconds between two relayed checkpoints on the inbox
|
||||
submission_latency: u64, |
||||
db: AbacusDB, |
||||
inbox: Arc<CachingInbox>, |
||||
prover_sync: TipProver, |
||||
} |
||||
|
||||
impl CheckpointRelayer { |
||||
pub(crate) fn new( |
||||
polling_interval: u64, |
||||
submission_latency: u64, |
||||
db: AbacusDB, |
||||
inbox: Arc<CachingInbox>, |
||||
) -> Self { |
||||
Self { |
||||
polling_interval, |
||||
submission_latency, |
||||
prover_sync: TipProver::from_disk(db.clone()), |
||||
db, |
||||
inbox, |
||||
} |
||||
} |
||||
|
||||
/// Only gets the messages desinated for the Relayers inbox
|
||||
async fn get_messages_between( |
||||
&self, |
||||
from_leaf_index: u32, |
||||
to_leaf_index: u32, |
||||
) -> Result<Option<Vec<CommittedMessage>>> { |
||||
let mut messages: Vec<CommittedMessage> = vec![]; |
||||
let mut current_leaf_index = from_leaf_index; |
||||
while current_leaf_index <= to_leaf_index { |
||||
// Relies on the indexer finding this message eventually
|
||||
self.db.wait_for_leaf(current_leaf_index).await?; |
||||
let maybe_message = self |
||||
.db |
||||
.message_by_leaf_index(current_leaf_index)? |
||||
.map(CommittedMessage::try_from) |
||||
.transpose()?; |
||||
match maybe_message { |
||||
Some(message) => { |
||||
if message.message.destination == self.inbox.local_domain() { |
||||
messages.push(message); |
||||
} |
||||
} |
||||
// This should never happen, but if it does, retry the range
|
||||
None => return Ok(None), |
||||
} |
||||
current_leaf_index += 1 |
||||
} |
||||
|
||||
Ok(Some(messages)) |
||||
} |
||||
|
||||
// Returns the newest "current" checkpoint index
|
||||
async fn submit_checkpoint_and_messages( |
||||
&mut self, |
||||
local_storage: &LocalStorage, |
||||
onchain_checkpoint_index: u32, |
||||
signed_checkpoint_index: u32, |
||||
messages: Vec<CommittedMessage>, |
||||
) -> Result<u32> { |
||||
// If the checkpoint storage is inconsistent, then this arm won't match
|
||||
// and it will cause us to have skipped this message batch
|
||||
if let Some(latest_signed_checkpoint) = local_storage |
||||
.fetch_checkpoint(signed_checkpoint_index) |
||||
.await? |
||||
{ |
||||
let batch = MessageBatch::new( |
||||
messages, |
||||
onchain_checkpoint_index, |
||||
latest_signed_checkpoint.clone(), |
||||
); |
||||
self.prover_sync.update_from_batch(&batch)?; |
||||
self.inbox |
||||
.submit_checkpoint(&latest_signed_checkpoint) |
||||
.await?; |
||||
|
||||
// TODO: sign in parallel
|
||||
for message in &batch.messages { |
||||
if let Some(proof) = self.db.proof_by_leaf_index(message.leaf_index)? { |
||||
self.inbox |
||||
.prove_and_process(&message.message, &proof) |
||||
.await?; |
||||
} |
||||
} |
||||
|
||||
// Sleep latency period after submission
|
||||
sleep(Duration::from_secs(self.submission_latency)).await; |
||||
Ok(latest_signed_checkpoint.checkpoint.index) |
||||
} else { |
||||
Ok(onchain_checkpoint_index) |
||||
} |
||||
} |
||||
|
||||
pub(crate) fn spawn(mut self) -> Instrumented<JoinHandle<Result<()>>> { |
||||
let span = info_span!("CheckpointRelayer"); |
||||
let local_storage = LocalStorage { |
||||
path: "/tmp/validatorsignatures".to_string(), |
||||
}; |
||||
tokio::spawn(async move { |
||||
let latest_inbox_checkpoint = self.inbox.latest_checkpoint(None).await?; |
||||
let mut onchain_checkpoint_index = latest_inbox_checkpoint.index; |
||||
// Checkpoints are 1-indexed, while leaves are 0-indexed
|
||||
let mut next_inbox_leaf_index = onchain_checkpoint_index; |
||||
loop { |
||||
sleep(Duration::from_secs(self.polling_interval)).await; |
||||
|
||||
if let Some(signed_checkpoint_index) = local_storage.latest_index().await? { |
||||
if signed_checkpoint_index <= onchain_checkpoint_index { |
||||
debug!( |
||||
onchain = onchain_checkpoint_index, |
||||
signed = signed_checkpoint_index, |
||||
"Signed checkpoint matches known checkpoint on-chain, continue" |
||||
); |
||||
continue; |
||||
} |
||||
|
||||
match self |
||||
.get_messages_between(next_inbox_leaf_index, signed_checkpoint_index) |
||||
.await? |
||||
{ |
||||
None => debug!("Couldn't fetch the relevant messages, retry this range"), |
||||
Some(messages) if messages.is_empty() => { |
||||
next_inbox_leaf_index = signed_checkpoint_index; |
||||
debug!("New checkpoint does not include messages for inbox") |
||||
} |
||||
Some(messages) => { |
||||
next_inbox_leaf_index = signed_checkpoint_index; |
||||
debug!( |
||||
len = messages.len(), |
||||
"Signed checkpoint allows for processing of new messages" |
||||
); |
||||
|
||||
onchain_checkpoint_index = self |
||||
.submit_checkpoint_and_messages( |
||||
&local_storage, |
||||
onchain_checkpoint_index, |
||||
signed_checkpoint_index, |
||||
messages, |
||||
) |
||||
.await?; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
}) |
||||
.instrument(span) |
||||
} |
||||
} |
@ -0,0 +1,176 @@ |
||||
//! Prover process: generate proofs in the tree.
|
||||
//!
|
||||
//! Struct responsible for syncing Prover
|
||||
|
||||
use ethers::core::types::H256; |
||||
|
||||
use abacus_core::accumulator::{ |
||||
merkle::{merkle_root_from_branch, MerkleTree, MerkleTreeError, Proof}, |
||||
TREE_DEPTH, |
||||
}; |
||||
|
||||
/// A depth-32 sparse Merkle tree capable of producing proofs for arbitrary
|
||||
/// elements.
|
||||
#[derive(Debug)] |
||||
pub struct Prover { |
||||
count: usize, |
||||
tree: MerkleTree, |
||||
} |
||||
|
||||
/// Prover Errors
|
||||
#[derive(Debug, thiserror::Error)] |
||||
pub enum ProverError { |
||||
/// Index is above tree max size
|
||||
#[error("Requested proof for index above u32::MAX: {0}")] |
||||
IndexTooHigh(usize), |
||||
/// Requested proof for a zero element
|
||||
#[error("Requested proof for a zero element. Requested: {index}. Tree has: {count}")] |
||||
ZeroProof { |
||||
/// The index requested
|
||||
index: usize, |
||||
/// The number of leaves
|
||||
count: usize, |
||||
}, |
||||
/// Bubbled up from underlying
|
||||
#[error(transparent)] |
||||
MerkleTreeError(#[from] MerkleTreeError), |
||||
/// Failed proof verification
|
||||
#[error("Proof verification failed. Root is {expected}, produced is {actual}")] |
||||
#[allow(dead_code)] |
||||
VerificationFailed { |
||||
/// The expected root (this tree's current root)
|
||||
expected: H256, |
||||
/// The root produced by branch evaluation
|
||||
actual: H256, |
||||
}, |
||||
} |
||||
|
||||
impl Default for Prover { |
||||
fn default() -> Self { |
||||
let full = MerkleTree::create(&[], TREE_DEPTH); |
||||
Self { |
||||
count: 0, |
||||
tree: full, |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Prover { |
||||
/// Push a leaf to the tree. Appends it to the first unoccupied slot
|
||||
///
|
||||
/// This will fail if the underlying tree is full.
|
||||
pub fn ingest(&mut self, element: H256) -> Result<H256, ProverError> { |
||||
self.count += 1; |
||||
self.tree.push_leaf(element, TREE_DEPTH)?; |
||||
Ok(self.tree.hash()) |
||||
} |
||||
|
||||
/// Return the current root hash of the tree
|
||||
pub fn root(&self) -> H256 { |
||||
self.tree.hash() |
||||
} |
||||
|
||||
/// Return the number of leaves that have been ingested
|
||||
pub fn count(&self) -> usize { |
||||
self.count |
||||
} |
||||
|
||||
/// Create a proof of a leaf in this tree.
|
||||
///
|
||||
/// Note, if the tree ingests more leaves, the root will need to be recalculated.
|
||||
pub fn prove(&self, index: usize) -> Result<Proof, ProverError> { |
||||
if index > u32::MAX as usize { |
||||
return Err(ProverError::IndexTooHigh(index)); |
||||
} |
||||
let count = self.count(); |
||||
if index >= count { |
||||
return Err(ProverError::ZeroProof { index, count }); |
||||
} |
||||
|
||||
let (leaf, hashes) = self.tree.generate_proof(index, TREE_DEPTH); |
||||
let mut path = [H256::zero(); 32]; |
||||
path.copy_from_slice(&hashes[..32]); |
||||
Ok(Proof { leaf, index, path }) |
||||
} |
||||
|
||||
/// Verify a proof against this tree's root.
|
||||
#[allow(dead_code)] |
||||
pub fn verify(&self, proof: &Proof) -> Result<(), ProverError> { |
||||
let actual = merkle_root_from_branch(proof.leaf, &proof.path, TREE_DEPTH, proof.index); |
||||
let expected = self.root(); |
||||
if expected == actual { |
||||
Ok(()) |
||||
} else { |
||||
Err(ProverError::VerificationFailed { expected, actual }) |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<T> From<T> for Prover |
||||
where |
||||
T: AsRef<[H256]>, |
||||
{ |
||||
fn from(t: T) -> Self { |
||||
let slice = t.as_ref(); |
||||
Self { |
||||
count: slice.len(), |
||||
tree: MerkleTree::create(slice, TREE_DEPTH), |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl std::iter::FromIterator<H256> for Prover { |
||||
/// Will panic if the tree fills
|
||||
fn from_iter<I: IntoIterator<Item = H256>>(iter: I) -> Self { |
||||
let mut prover = Self::default(); |
||||
prover.extend(iter); |
||||
prover |
||||
} |
||||
} |
||||
|
||||
impl std::iter::Extend<H256> for Prover { |
||||
/// Will panic if the tree fills
|
||||
fn extend<I: IntoIterator<Item = H256>>(&mut self, iter: I) { |
||||
for i in iter { |
||||
self.ingest(i).expect("!tree full"); |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[cfg(test)] |
||||
mod test { |
||||
use super::*; |
||||
use abacus_core::test_utils; |
||||
use ethers::utils::hash_message; |
||||
|
||||
#[test] |
||||
fn it_produces_and_verifies_proofs() { |
||||
let test_cases = test_utils::load_merkle_test_json(); |
||||
|
||||
for test_case in test_cases.iter() { |
||||
let mut tree = Prover::default(); |
||||
|
||||
// insert the leaves
|
||||
for leaf in test_case.leaves.iter() { |
||||
let hashed_leaf = hash_message(leaf); |
||||
tree.ingest(hashed_leaf).unwrap(); |
||||
} |
||||
|
||||
// assert the tree has the proper leaf count
|
||||
assert_eq!(tree.count(), test_case.leaves.len()); |
||||
|
||||
// assert the tree generates the proper root
|
||||
let root = tree.root(); // root is type H256
|
||||
assert_eq!(root, test_case.expected_root); |
||||
|
||||
for n in 0..test_case.leaves.len() { |
||||
// assert the tree generates the proper proof for this leaf
|
||||
let proof = tree.prove(n).unwrap(); |
||||
assert_eq!(proof, test_case.proofs[n]); |
||||
|
||||
// check that the tree can verify the proof for this leaf
|
||||
tree.verify(&proof).unwrap(); |
||||
} |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,226 @@ |
||||
use crate::prover::{Prover, ProverError}; |
||||
use abacus_core::{ |
||||
accumulator::incremental::IncrementalMerkle, |
||||
db::{AbacusDB, DbError}, |
||||
ChainCommunicationError, CommittedMessage, SignedCheckpoint, |
||||
}; |
||||
use color_eyre::eyre::Result; |
||||
use ethers::core::types::H256; |
||||
use std::fmt::Display; |
||||
|
||||
use tracing::{debug, error, info, instrument}; |
||||
|
||||
/// Struct to update prover
|
||||
pub struct MessageBatch { |
||||
/// Messages
|
||||
pub messages: Vec<CommittedMessage>, |
||||
current_checkpoint_index: u32, |
||||
signed_target_checkpoint: SignedCheckpoint, |
||||
} |
||||
|
||||
impl MessageBatch { |
||||
pub fn new( |
||||
messages: Vec<CommittedMessage>, |
||||
current_checkpoint_index: u32, |
||||
signed_target_checkpoint: SignedCheckpoint, |
||||
) -> Self { |
||||
Self { |
||||
messages, |
||||
current_checkpoint_index, |
||||
signed_target_checkpoint, |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// Struct to sync prover.
|
||||
#[derive(Debug)] |
||||
pub struct TipProver { |
||||
db: AbacusDB, |
||||
prover: Prover, |
||||
incremental: IncrementalMerkle, |
||||
} |
||||
|
||||
impl Display for TipProver { |
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
||||
write!(f, "TipProver {{ ")?; |
||||
write!( |
||||
f, |
||||
"incremental: {{ root: {:?}, size: {} }}, ", |
||||
self.incremental.root(), |
||||
self.incremental.count() |
||||
)?; |
||||
write!( |
||||
f, |
||||
"prover: {{ root: {:?}, size: {} }} ", |
||||
self.prover.root(), |
||||
self.prover.count() |
||||
)?; |
||||
write!(f, "}}")?; |
||||
Ok(()) |
||||
} |
||||
} |
||||
|
||||
/// TipProver errors
|
||||
#[derive(Debug, thiserror::Error)] |
||||
pub enum TipProverError { |
||||
/// Local tree up-to-date but root does not match signed checkpoint"
|
||||
#[error("Local tree up-to-date but root does not match checkpoint. Local root: {prover_root}. checkpoint root: {checkpoint_root}. WARNING: this could indicate malicious validator and/or long reorganization process!")] |
||||
MismatchedRoots { |
||||
/// Root of prover's local merkle tree
|
||||
prover_root: H256, |
||||
/// Root of the incremental merkle tree
|
||||
incremental_root: H256, |
||||
/// New root contained in signed checkpoint
|
||||
checkpoint_root: H256, |
||||
}, |
||||
/// Leaf index was not found in DB, despite batch providing messages after
|
||||
#[error("Leaf index was not found {leaf_index:?}")] |
||||
UnavailableLeaf { |
||||
/// Root of prover's local merkle tree
|
||||
leaf_index: u32, |
||||
}, |
||||
/// TipProver attempts Prover operation and receives ProverError
|
||||
#[error(transparent)] |
||||
ProverError(#[from] ProverError), |
||||
/// TipProver receives ChainCommunicationError from chain API
|
||||
#[error(transparent)] |
||||
ChainCommunicationError(#[from] ChainCommunicationError), |
||||
/// DB Error
|
||||
#[error("{0}")] |
||||
DbError(#[from] DbError), |
||||
} |
||||
|
||||
impl TipProver { |
||||
fn store_proof(&self, leaf_index: u32) -> Result<(), TipProverError> { |
||||
match self.prover.prove(leaf_index as usize) { |
||||
Ok(proof) => { |
||||
self.db.store_proof(leaf_index, &proof)?; |
||||
info!( |
||||
leaf_index, |
||||
root = ?self.prover.root(), |
||||
"Storing proof for leaf {}", |
||||
leaf_index |
||||
); |
||||
Ok(()) |
||||
} |
||||
// ignore the storage request if it's out of range (e.g. leaves
|
||||
// up-to-date but no update containing leaves produced yet)
|
||||
Err(ProverError::ZeroProof { index: _, count: _ }) => Ok(()), |
||||
// bubble up any other errors
|
||||
Err(e) => Err(e.into()), |
||||
} |
||||
} |
||||
|
||||
/// Given rocksdb handle `db` containing merkle tree leaves,
|
||||
/// instantiates new `TipProver` and fills prover's merkle tree
|
||||
#[instrument(level = "debug", skip(db))] |
||||
pub fn from_disk(db: AbacusDB) -> Self { |
||||
// Ingest all leaves in db into prover tree
|
||||
let mut prover = Prover::default(); |
||||
let mut incremental = IncrementalMerkle::default(); |
||||
|
||||
if let Some(root) = db.retrieve_latest_root().expect("db error") { |
||||
for i in 0.. { |
||||
match db.leaf_by_leaf_index(i) { |
||||
Ok(Some(leaf)) => { |
||||
debug!(leaf_index = i, "Ingesting leaf from_disk"); |
||||
prover.ingest(leaf).expect("!tree full"); |
||||
incremental.ingest(leaf); |
||||
assert_eq!(prover.root(), incremental.root()); |
||||
if prover.root() == root { |
||||
break; |
||||
} |
||||
} |
||||
Ok(None) => break, |
||||
Err(e) => { |
||||
error!(error = %e, "Error in TipProver::from_disk"); |
||||
panic!("Error in TipProver::from_disk"); |
||||
} |
||||
} |
||||
} |
||||
info!(target_latest_root = ?root, root = ?incremental.root(), "Reloaded TipProver from disk"); |
||||
} |
||||
|
||||
let sync = Self { |
||||
prover, |
||||
incremental, |
||||
db, |
||||
}; |
||||
|
||||
// Ensure proofs exist for all leaves
|
||||
for i in 0..sync.prover.count() as u32 { |
||||
match ( |
||||
sync.db.leaf_by_leaf_index(i).expect("db error"), |
||||
sync.db.proof_by_leaf_index(i).expect("db error"), |
||||
) { |
||||
(Some(_), None) => sync.store_proof(i).expect("db error"), |
||||
(None, _) => break, |
||||
_ => {} |
||||
} |
||||
} |
||||
|
||||
sync |
||||
} |
||||
|
||||
fn ingest_leaf_index(&mut self, leaf_index: u32) -> Result<(), TipProverError> { |
||||
match self.db.leaf_by_leaf_index(leaf_index) { |
||||
Ok(Some(leaf)) => { |
||||
debug!(leaf_index = leaf_index, "Ingesting leaf update_from_batch"); |
||||
self.prover.ingest(leaf).expect("!tree full"); |
||||
self.incremental.ingest(leaf); |
||||
assert_eq!(self.prover.root(), self.incremental.root()); |
||||
Ok(()) |
||||
} |
||||
Ok(None) => { |
||||
error!("We should not arrive here"); |
||||
Err(TipProverError::UnavailableLeaf { leaf_index }) |
||||
} |
||||
Err(e) => Err(e.into()), |
||||
} |
||||
} |
||||
|
||||
/// Update the prover with a message batch
|
||||
pub fn update_from_batch(&mut self, batch: &MessageBatch) -> Result<(), TipProverError> { |
||||
// TODO:: If we are ahead already, something went wrong
|
||||
// if we are somehow behind the current index, prove until then
|
||||
|
||||
for i in (self.prover.count() as u32)..batch.current_checkpoint_index + 1 { |
||||
self.ingest_leaf_index(i)?; |
||||
} |
||||
|
||||
info!( |
||||
count = self.prover.count(), |
||||
"update_from_batch fast forward" |
||||
); |
||||
// prove the until target (checkpoints are 1-indexed)
|
||||
for i in |
||||
(batch.current_checkpoint_index + 1)..batch.signed_target_checkpoint.checkpoint.index |
||||
{ |
||||
self.ingest_leaf_index(i)?; |
||||
} |
||||
|
||||
let prover_root = self.prover.root(); |
||||
let incremental_root = self.incremental.root(); |
||||
let checkpoint_root = batch.signed_target_checkpoint.checkpoint.root; |
||||
if prover_root != incremental_root || prover_root != checkpoint_root { |
||||
return Err(TipProverError::MismatchedRoots { |
||||
prover_root, |
||||
incremental_root, |
||||
checkpoint_root, |
||||
}); |
||||
} |
||||
|
||||
info!( |
||||
count = self.prover.count(), |
||||
"update_from_batch batch proving" |
||||
); |
||||
// store proofs in DB
|
||||
|
||||
for message in &batch.messages { |
||||
self.store_proof(message.leaf_index)?; |
||||
} |
||||
// TODO: push proofs to S3
|
||||
|
||||
Ok(()) |
||||
} |
||||
} |
Loading…
Reference in new issue