refactor: split updater tasks for safety (#909)

* refactor: split updater tasks, refactor opticsdb

* feature: asrefs for db types

* refactor: refactor typeddb to hold entity and pair with all loads/stores

* ci: cargo +nightly

* ci: use nightly toolchain

* fix: use 1.56 stable not nightly

* fix: updater can never overwrite an existing update with a conflicting one

* fix: also check suggested update for state change guard

Co-authored-by: Luke Tchang <ltchang@stanford.edu>
buddies-main-deployment
Conner Swann 3 years ago committed by GitHub
parent ad8039d91f
commit 7b2c4a21df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/CODEOWNERS
  2. 6
      .github/workflows/rust.yml
  3. 2
      rust-toolchain
  4. 575
      rust/Cargo.lock
  5. 4
      rust/Cargo.toml
  6. 6
      rust/Dockerfile
  7. 2
      rust/agents/kathy/Cargo.toml
  8. 2
      rust/agents/processor/Cargo.toml
  9. 17
      rust/agents/processor/src/processor.rs
  10. 40
      rust/agents/processor/src/prover_sync.rs
  11. 4
      rust/agents/processor/src/push.rs
  12. 2
      rust/agents/relayer/Cargo.toml
  13. 3
      rust/agents/updater/Cargo.toml
  14. 2
      rust/agents/updater/src/main.rs
  15. 122
      rust/agents/updater/src/produce.rs
  16. 55
      rust/agents/updater/src/submit.rs
  17. 220
      rust/agents/updater/src/updater.rs
  18. 2
      rust/agents/watcher/Cargo.toml
  19. 2
      rust/agents/watcher/src/watcher.rs
  20. 2
      rust/chains/optics-ethereum/Cargo.toml
  21. 39
      rust/chains/optics-ethereum/src/home.rs
  22. 6
      rust/chains/optics-ethereum/src/lib.rs
  23. 10
      rust/chains/optics-ethereum/src/replica.rs
  24. 1
      rust/chains/optics-ethereum/src/xapp.rs
  25. 2
      rust/optics-base/Cargo.toml
  26. 3
      rust/optics-core/Cargo.toml
  27. 6
      rust/optics-core/src/chain.rs
  28. 12
      rust/optics-core/src/db/mod.rs
  29. 251
      rust/optics-core/src/db/optics_db.rs
  30. 43
      rust/optics-core/src/db/typed_db.rs
  31. 19
      rust/optics-core/src/lib.rs
  32. 2
      rust/optics-core/src/models/replica.rs
  33. 1
      rust/optics-core/src/utils.rs
  34. 2
      rust/optics-test/Cargo.toml
  35. 20
      rust/optics-test/src/test_utils.rs
  36. 2
      rust/tools/balance-exporter/Cargo.toml
  37. 4
      rust/tools/kms-cli/Cargo.toml
  38. 20
      rust/tools/kms-cli/src/main.rs
  39. 2
      rust/tools/optics-cli/Cargo.toml
  40. 14
      rust/tools/optics-cli/src/subcommands/db_state.rs
  41. 12
      rust/tools/optics-cli/src/subcommands/prove.rs

@ -1,2 +1,2 @@
* @prestwich @anna-carroll @erinhales
rust/ @prestwich @emberian
rust/ @prestwich @emberian @ltchang

@ -20,7 +20,7 @@ jobs:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.54.0
toolchain: stable
- uses: Swatinem/rust-cache@v1
with:
working-directory: ./rust
@ -35,7 +35,7 @@ jobs:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.54.0
toolchain: stable
- uses: Swatinem/rust-cache@v1
with:
working-directory: ./rust
@ -50,7 +50,7 @@ jobs:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.54.0
toolchain: stable
- uses: Swatinem/rust-cache@v1
with:
working-directory: ./rust

@ -1,3 +1,3 @@
[toolchain]
channel = "1.54"
channel = "1.56"
profile = "default"

575
rust/Cargo.lock generated

File diff suppressed because it is too large Load Diff

@ -1,3 +1,5 @@
cargo-features = ["edition2021"]
[workspace]
members = [
@ -8,7 +10,7 @@ members = [
"agents/kathy",
"agents/updater",
"agents/relayer",
"agents/watcher",
# "agents/watcher",
"agents/processor",
"tools/kms-cli",
"tools/optics-cli",

@ -1,13 +1,13 @@
# syntax=docker/dockerfile:experimental
FROM rust:1.54 as builder
FROM rust:1.56 as builder
WORKDIR /usr/src
# 1a: Prepare for static linking
RUN apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y musl-tools clang-6.0 && \
rustup target add x86_64-unknown-linux-musl
apt-get install -y musl-tools clang && \
rustup target add x86_64-unknown-linux-musl
# Add workspace to workdir
COPY agents ./agents

@ -2,7 +2,7 @@
name = "kathy"
version = "0.1.0"
authors = ["James Prestwich <james@prestwi.ch>"]
edition = "2018"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

@ -2,7 +2,7 @@
name = "processor"
version = "0.1.0"
authors = ["anna-caroll <anna.s.carroll@gmail.com>"]
edition = "2018"
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }

@ -73,7 +73,7 @@ impl Replica {
// 5. Submit the proof to the replica
let mut next_message_nonce: u32 = self
.db
.retrieve_latest_nonce(self.home.name(), domain)?
.retrieve_latest_nonce(domain)?
.map(|n: u32| n + 1)
.unwrap_or_default();
@ -108,7 +108,7 @@ impl Replica {
{
Ok(Flow::Advance) => {
self.db
.store_latest_nonce(self.home.name(), domain, next_message_nonce)?;
.store_latest_nonce(domain, next_message_nonce)?;
next_message_nonce += 1;
self.next_message_nonce
@ -199,10 +199,7 @@ impl Replica {
return Ok(Flow::Advance);
}
let proof = match self
.db
.proof_by_leaf_index(self.home.name(), message.leaf_index)
{
let proof = match self.db.proof_by_leaf_index(message.leaf_index) {
Ok(Some(p)) => p,
Ok(None) => {
info!(
@ -359,7 +356,7 @@ impl OpticsAgent for Processor {
let home = self.home();
let next_message_nonce = self.next_message_nonce.clone();
let interval = self.interval;
let db = OpticsDB::new(self.db());
let db = OpticsDB::new(home.name(), self.db());
let replica_opt = self.replica_by_name(name);
let name = name.to_owned();
@ -394,8 +391,8 @@ impl OpticsAgent for Processor {
// tree sync
info!("Starting ProverSync");
let sync =
ProverSync::from_disk(self.home().name().to_owned(), OpticsDB::new(self.db()));
let db = OpticsDB::new(self.home().name().to_owned(), self.db());
let sync = ProverSync::from_disk(db.clone());
let sync_task = sync.spawn();
info!("Starting indexer");
@ -434,7 +431,7 @@ impl OpticsAgent for Processor {
self.core.home.name(),
&config.bucket,
config.region.parse().expect("invalid s3 region"),
OpticsDB::new(self.db()),
db.clone(),
)
.spawn(),
)

@ -13,7 +13,6 @@ use tracing::{debug, error, info, info_span, instrument, instrument::Instrumente
/// Struct to sync prover.
#[derive(Debug)]
pub struct ProverSync {
home_name: String,
db: OpticsDB,
prover: Prover,
incremental: IncrementalMerkle,
@ -71,7 +70,7 @@ impl ProverSync {
fn store_proof(&self, leaf_index: u32) -> Result<(), ProverSyncError> {
match self.prover.prove(leaf_index as usize) {
Ok(proof) => {
self.db.store_proof(&self.home_name, leaf_index, &proof)?;
self.db.store_proof(leaf_index, &proof)?;
info!(
leaf_index,
root = ?self.prover.root(),
@ -91,14 +90,14 @@ impl ProverSync {
/// Given rocksdb handle `db` containing merkle tree leaves,
/// instantiates new `ProverSync` and fills prover's merkle tree
#[instrument(level = "debug", skip(db))]
pub fn from_disk(home_name: String, db: OpticsDB) -> Self {
pub fn from_disk(db: OpticsDB) -> Self {
// Ingest all leaves in db into prover tree
let mut prover = Prover::default();
let mut incremental = IncrementalMerkle::default();
if let Some(root) = db.retrieve_latest_root(&home_name).expect("db error") {
if let Some(root) = db.retrieve_latest_root().expect("db error") {
for i in 0.. {
match db.leaf_by_leaf_index(&home_name, i) {
match db.leaf_by_leaf_index(i) {
Ok(Some(leaf)) => {
debug!(leaf_index = i, "Ingesting leaf from_disk");
prover.ingest(leaf).expect("!tree full");
@ -119,7 +118,6 @@ impl ProverSync {
}
let sync = Self {
home_name: home_name.to_owned(),
prover,
incremental,
db,
@ -128,10 +126,8 @@ impl ProverSync {
// Ensure proofs exist for all leaves
for i in 0..sync.prover.count() as u32 {
match (
sync.db.leaf_by_leaf_index(&home_name, i).expect("db error"),
sync.db
.proof_by_leaf_index(&home_name, i)
.expect("db error"),
sync.db.leaf_by_leaf_index(i).expect("db error"),
sync.db.proof_by_leaf_index(i).expect("db error"),
) {
(Some(_), None) => sync.store_proof(i).expect("db error"),
(None, _) => break,
@ -158,7 +154,7 @@ impl ProverSync {
let mut leaves = vec![];
for i in range {
let leaf = self.db.wait_for_leaf(&self.home_name, i as u32).await?;
let leaf = self.db.wait_for_leaf(i as u32).await?;
if leaf.is_none() {
break;
}
@ -219,11 +215,7 @@ impl ProverSync {
// store all calculated proofs in the db
// TODO(luke): refactor prover_sync so we dont have to iterate over every leaf (match from_disk implementation)
for idx in 0..self.prover.count() {
if self
.db
.proof_by_leaf_index(&self.home_name, idx as u32)?
.is_none()
{
if self.db.proof_by_leaf_index(idx as u32)?.is_none() {
self.store_proof(idx as u32)?;
}
}
@ -270,11 +262,7 @@ impl ProverSync {
// As we fill the incremental merkle, its tree_size will always be
// equal to the index of the next leaf we want (e.g. if tree_size
// is 3, we want the 4th leaf, which is at index 3)
if let Some(leaf) = self
.db
.wait_for_leaf(&self.home_name, tree_size as u32)
.await?
{
if let Some(leaf) = self.db.wait_for_leaf(tree_size as u32).await? {
info!(
index = tree_size,
leaf = ?leaf,
@ -317,9 +305,7 @@ impl ProverSync {
tokio::spawn(async move {
loop {
let local_root = self.local_root();
let signed_update_opt = self
.db
.update_by_previous_root(&self.home_name, local_root)?;
let signed_update_opt = self.db.update_by_previous_root(local_root)?;
// This if block is somewhat ugly.
// First we check if there is a signed update with the local root.
@ -335,11 +321,7 @@ impl ProverSync {
);
self.update_full(local_root, signed_update.update.new_root)
.await?;
} else if !local_root.is_zero()
&& self
.db
.update_by_new_root(&self.home_name, local_root)?
.is_none()
} else if !local_root.is_zero() && self.db.update_by_new_root(local_root)?.is_none()
{
bail!(ProverSyncError::InvalidLocalRoot { local_root });
}

@ -112,12 +112,12 @@ impl Pusher {
tokio::spawn(async move {
let mut index = 0;
loop {
let proof = self.db.proof_by_leaf_index(&self.name, index)?;
let proof = self.db.proof_by_leaf_index(index)?;
match proof {
Some(proof) => {
let message = self
.db
.message_by_leaf_index(&self.name, index)?
.message_by_leaf_index(index)?
.ok_or_else(|| eyre!("Missing message for known proof"))?;
let proven = ProvenMessage {
proof,

@ -2,7 +2,7 @@
name = "relayer"
version = "0.1.0"
authors = ["ltchang <ltchang@stanford.edu>"]
edition = "2018"
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }

@ -2,7 +2,7 @@
name = "updater"
version = "0.1.0"
authors = ["James Prestwich <prestwich@clabs.co>"]
edition = "2018"
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }
@ -27,6 +27,7 @@ paste = "1.0.5"
prometheus = "0.12"
warp = "0.3"
hex = "0.4.3"
[dev-dependencies]
mockall = "0.9.1"

@ -7,7 +7,9 @@
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
mod produce;
mod settings;
mod submit;
mod updater;
use color_eyre::Result;

@ -0,0 +1,122 @@
use ethers::core::types::H256;
use prometheus::IntCounterVec;
use std::{sync::Arc, time::Duration};
use color_eyre::Result;
use optics_base::{Homes, OpticsAgent};
use optics_core::{db::OpticsDB, Common, Home, Signers};
use tokio::{task::JoinHandle, time::sleep};
use tracing::{debug, info, info_span, instrument::Instrumented, Instrument};
use crate::updater::Updater;
#[derive(Debug)]
pub(crate) struct UpdateProducer {
home: Arc<Homes>,
db: OpticsDB,
signer: Arc<Signers>,
interval_seconds: u64,
update_pause: u64,
signed_attestation_count: IntCounterVec,
}
impl UpdateProducer {
pub(crate) fn new(
home: Arc<Homes>,
db: OpticsDB,
signer: Arc<Signers>,
interval_seconds: u64,
update_pause: u64,
signed_attestation_count: IntCounterVec,
) -> Self {
Self {
home,
db,
signer,
interval_seconds,
update_pause,
signed_attestation_count,
}
}
fn find_latest_root(&self) -> Result<H256> {
// If db latest root is empty, this will produce `H256::default()`
// which is equal to `H256::zero()`
Ok(self.db.retrieve_latest_root()?.unwrap_or_default())
}
pub(crate) fn spawn(self) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("UpdateProducer");
tokio::spawn(async move {
loop {
// We sleep at the top to make continues work fine
sleep(Duration::from_secs(self.interval_seconds)).await;
let current_root = self.find_latest_root()?;
if let Some(suggested) = self.home.produce_update().await? {
if suggested.previous_root != current_root {
// This either indicates that the indexer is catching
// up or that the chain is awaiting a new update. We
// should ignore it.
debug!(
local = ?suggested.previous_root,
remote = ?current_root,
"Local root not equal to chain root. Skipping update."
);
continue;
}
// Ensure we have not already signed a conflicting update.
// Ignore suggested if we have.
if let Some(existing) = self.db.retrieve_produced_update(suggested.previous_root)? {
if existing.update.new_root != suggested.new_root {
info!("Updater ignoring conflicting suggested update. Indicates chain awaiting already produced update. Existing update: {:?}. Suggested conflicting update: {:?}.", &existing, &suggested);
continue;
}
}
// Sleep for `update_pause` seconds so we can check for
// unwanted state changes afterwards
sleep(Duration::from_secs(self.update_pause)).await;
// If HomeIndexer found new root from that doesn't
// match our most current root, continue
if self.find_latest_root()? != current_root {
continue;
}
// If home produced update builds off a different root than
// our suggested update's previous root, continue
if let Some(check_suggested) = self.home.produce_update().await? {
if check_suggested.previous_root != suggested.previous_root {
continue;
}
} else {
continue;
}
// If the suggested matches our local view, sign an update
// and store it as locally produced
let signed = suggested.sign_with(self.signer.as_ref()).await?;
self.signed_attestation_count
.with_label_values(&[self.home.name(), Updater::AGENT_NAME])
.inc();
let hex_signature = format!("0x{}", hex::encode(signed.signature.to_vec()));
info!(
previous_root = ?signed.update.previous_root,
new_root = ?signed.update.new_root,
hex_signature = %hex_signature,
"Storing new update in DB for broadcast"
);
self.db.store_produced_update(&signed)?;
}
}
})
.instrument(span)
}
}

@ -0,0 +1,55 @@
use std::sync::Arc;
use optics_base::Homes;
use optics_core::{db::OpticsDB, Common};
use std::time::Duration;
use color_eyre::Result;
use tokio::{task::JoinHandle, time::sleep};
use tracing::{info, info_span, instrument::Instrumented, Instrument};
pub(crate) struct UpdateSubmitter {
home: Arc<Homes>,
db: OpticsDB,
interval_seconds: u64,
}
impl UpdateSubmitter {
pub(crate) fn new(home: Arc<Homes>, db: OpticsDB, interval_seconds: u64) -> Self {
Self {
home,
db,
interval_seconds,
}
}
pub(crate) fn spawn(self) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("UpdateSubmitter");
tokio::spawn(async move {
// start from the chain state
let mut committed_root = self.home.committed_root().await?;
loop {
sleep(Duration::from_secs(self.interval_seconds)).await;
// if we have produced an update building off the committed root
// submit it
if let Some(signed) = self.db.retrieve_produced_update(committed_root)? {
let hex_signature = format!("0x{}", hex::encode(signed.signature.to_vec()));
info!(
previous_root = ?signed.update.previous_root,
new_root = ?signed.update.new_root,
hex_signature = %hex_signature,
"Submitting update to chain"
);
self.home.update(&signed).await?;
// continue from local state
committed_root = signed.update.new_root;
}
}
})
.instrument(span)
}
}

@ -1,200 +1,18 @@
use std::{sync::Arc, time::Duration};
use std::sync::Arc;
use async_trait::async_trait;
use color_eyre::{
eyre::{bail, ensure, Context},
Result,
};
use color_eyre::{eyre::ensure, Result};
use ethers::{signers::Signer, types::Address};
use futures_util::future::select_all;
use prometheus::IntCounterVec;
use tokio::{
sync::{
mpsc::{self, error::TrySendError, Receiver, Sender},
Mutex,
},
task::JoinHandle,
time::sleep,
};
use tracing::{error, info, instrument::Instrumented, Instrument};
use crate::settings::UpdaterSettings as Settings;
use optics_base::{AgentCore, Homes, OpticsAgent};
use optics_core::{db::OpticsDB, Common, Home, SignedUpdate, Signers, Update};
#[derive(Debug)]
struct UpdateHandler {
home: Arc<Homes>,
rx: Receiver<Update>,
update_pause: u64,
signer: Arc<Signers>,
db: OpticsDB,
mutex: Arc<Mutex<()>>,
signed_attestation_count: IntCounterVec,
}
impl std::fmt::Display for UpdateHandler {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"UpdateHandler: {{ home: {:?}, signer: {:?}, update_pause: {} }}",
self.home, self.signer, self.update_pause
)
}
}
impl UpdateHandler {
fn new(
home: Arc<Homes>,
rx: Receiver<Update>,
update_pause: u64,
signer: Arc<Signers>,
db: OpticsDB,
mutex: Arc<Mutex<()>>,
signed_attestation_count: IntCounterVec,
) -> Self {
Self {
home,
rx,
update_pause,
signer,
db,
mutex,
signed_attestation_count,
}
}
fn check_conflict(&self, update: &Update) -> Option<SignedUpdate> {
self.db
.update_by_previous_root(self.home.name(), update.previous_root)
.expect("db failure")
}
#[tracing::instrument(err, skip(self), fields(self = %self))]
async fn acceptable(&self, update: &Update) -> Result<bool> {
// Poll chain API to see if queue still contains new root
// and old root still equals home's current root
let (in_queue, committed_root) = tokio::join!(
self.home.queue_contains(update.new_root),
self.home.committed_root()
);
if in_queue.is_err() {
info!("Update no longer in queue");
}
if committed_root.is_err() {
error!("Connection gone");
}
let in_queue = in_queue?;
let committed_root = committed_root?;
let old_root = update.previous_root;
Ok(in_queue && committed_root == old_root)
}
#[tracing::instrument(err, skip(self), fields(self = %self))]
async fn handle_update(&self, update: Update) -> Result<()> {
info!("Have an update, awaiting the tick");
// We poll acceptable immediately, to prevent waiting on
// any unacceptable updates that got into the channel
// We poll it AGAIN after sleeping to ensure that the update
// is still acceptable.
// TODO(james): later refactor this to only allow acceptable
// updates into the channel?
if !self.acceptable(&update).await? {
info!("Declined to submit update. No longer current");
return Ok(());
}
// Wait `update_pause` seconds
sleep(Duration::from_secs(self.update_pause)).await;
if !self.acceptable(&update).await? {
info!("Declined to submit update. No longer current");
return Ok(());
}
use tokio::task::JoinHandle;
use tracing::{instrument::Instrumented, Instrument};
// acquire guard. If the guard can't be acquired, that
// means a tx is in flight and we should try again later.
let _guard = self
.mutex
.try_lock()
.wrap_err("Declined to submit update.")?;
// If update still valid and doesn't conflict with local
// history of signed updates, sign and submit update. Note
// that because we acquire a guard, only one task
// can check and enter the below `if` block at a time,
// protecting from races between threads.
if self.check_conflict(&update).is_some() {
bail!("Found conflicting update in DB");
}
// If we have a conflict, we grab that one instead
let signed = update.sign_with(self.signer.as_ref()).await.unwrap();
// If successfully submitted update, record in db
info!(
"Dispatching signed update to contract. Current root is {:?}, new root is {:?}",
&signed.update.previous_root, &signed.update.new_root
);
self.signed_attestation_count
.with_label_values(&[self.home.name(), Updater::AGENT_NAME])
.inc();
self.home.update(&signed).await?;
info!("Storing signed update in db");
self.db.store_latest_update(self.home.name(), &signed)?;
Ok(())
// guard dropped here
}
fn spawn(mut self) -> Instrumented<JoinHandle<Result<()>>> {
tokio::spawn(async move {
while let Some(update) = self.rx.recv().await {
self.handle_update(update).await?;
}
Ok(())
})
.in_current_span()
}
}
struct UpdatePoller {
home: Arc<Homes>,
tx: Sender<Update>,
interval_seconds: u64,
}
impl UpdatePoller {
fn new(home: Arc<Homes>, tx: Sender<Update>, interval_seconds: u64) -> Self {
Self {
home,
tx,
interval_seconds,
}
}
fn spawn(self) -> Instrumented<JoinHandle<Result<()>>> {
tokio::spawn(async move {
loop {
if let Some(update) = self.home.produce_update().await? {
if let Err(TrySendError::Closed(_)) = self.tx.try_send(update) {
bail!("UpdatePoller died");
}
}
sleep(Duration::from_secs(self.interval_seconds)).await;
}
})
.in_current_span()
}
}
use crate::{
produce::UpdateProducer, settings::UpdaterSettings as Settings, submit::UpdateSubmitter,
};
use optics_base::{AgentCore, OpticsAgent};
use optics_core::{db::OpticsDB, Common, Signers};
/// An updater agent
#[derive(Debug)]
@ -258,19 +76,19 @@ impl OpticsAgent for Updater {
// First we check that we have the correct key to sign with.
let home = self.home();
let address = self.signer.address();
let db = OpticsDB::new(self.home().name(), self.db());
let (tx, rx) = mpsc::channel(1);
let poller = UpdatePoller::new(self.home(), tx, self.interval_seconds);
let handler = UpdateHandler::new(
let produce = UpdateProducer::new(
self.home(),
rx,
self.update_pause,
db.clone(),
self.signer.clone(),
OpticsDB::new(self.db()),
Default::default(),
self.interval_seconds,
self.update_pause,
self.signed_attestation_count.clone(),
);
let submit = UpdateSubmitter::new(self.home(), db, self.interval_seconds);
tokio::spawn(async move {
let expected: Address = home.updater().await?.into();
ensure!(
@ -279,10 +97,10 @@ impl OpticsAgent for Updater {
expected,
address
);
let poller_task = poller.spawn();
let handler_task = handler.spawn();
let produce_task = produce.spawn();
let submit_task = submit.spawn();
let (res, _, rem) = select_all(vec![poller_task, handler_task]).await;
let (res, _, rem) = select_all(vec![produce_task, submit_task]).await;
for task in rem.into_iter() {
task.into_inner().abort();

@ -2,7 +2,7 @@
name = "watcher"
version = "0.1.0"
authors = ["Luke Tchang <ltchang@stanford.edu>"]
edition = "2018"
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }

@ -681,7 +681,7 @@ mod test {
let (_tx, rx) = mpsc::channel(200);
let mut handler = UpdateHandler {
rx,
db: OpticsDB::new(db),
db: OpticsDB::new("home", db),
home: Arc::new(mock_home.into()),
};

@ -2,7 +2,7 @@
name = "optics-ethereum"
version = "0.1.0"
authors = ["Erin Hales <mcbridee093@gmail.com>"]
edition = "2018"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

@ -33,17 +33,24 @@ use crate::report_tx;
static LAST_INSPECTED: &str = "homeIndexerLastInspected";
#[allow(missing_docs)]
abigen!(
EthereumHomeInternal,
"./chains/optics-ethereum/abis/Home.abi.json"
);
impl<M> std::fmt::Display for EthereumHomeInternal<M>
where
M: ethers::providers::Middleware,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
struct HomeIndexer<M>
where
M: ethers::providers::Middleware,
{
home_name: String,
contract: Arc<EthereumHomeInternal<M>>,
provider: Arc<M>,
db: OpticsDB,
@ -95,9 +102,8 @@ where
for update_with_meta in updates_with_meta {
self.db
.store_latest_update(&self.home_name, &update_with_meta.signed_update)?;
.store_latest_update(&update_with_meta.signed_update)?;
self.db.store_update_metadata(
&self.home_name,
update_with_meta.signed_update.update.new_root,
update_with_meta.metadata,
)?;
@ -130,8 +136,7 @@ where
});
for message in messages {
self.db
.store_raw_committed_message(&self.home_name, &message)?;
self.db.store_raw_committed_message(&message)?;
let committed_message: CommittedMessage = message.try_into()?;
info!(
@ -152,7 +157,7 @@ where
tokio::spawn(async move {
let mut next_height: u32 = self
.db
.retrieve_decodable(&self.home_name, "", LAST_INSPECTED)
.retrieve_decodable("", LAST_INSPECTED)
.expect("db failure")
.unwrap_or(self.from_height);
info!(
@ -180,8 +185,7 @@ where
self.sync_leaves(next_height, to)
)?;
self.db
.store_encodable(&self.home_name, "", LAST_INSPECTED, &next_height)?;
self.db.store_encodable("", LAST_INSPECTED, &next_height)?;
next_height = to;
// sleep here if we've caught up
if to == tip {
@ -225,7 +229,7 @@ where
contract: Arc::new(EthereumHomeInternal::new(address, provider.clone())),
domain: *domain,
name: name.to_owned(),
db: OpticsDB::new(db),
db: OpticsDB::new(name.to_owned(), db),
provider,
}
}
@ -278,7 +282,7 @@ where
old_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.update_by_previous_root(&self.name, old_root)? {
if let Some(update) = self.db.update_by_previous_root(old_root)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
@ -291,14 +295,14 @@ where
new_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.update_by_new_root(&self.name, new_root)? {
if let Some(update) = self.db.update_by_new_root(new_root)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
}
}
#[tracing::instrument(err, skip(self), fields(hexSignature = %format!("0x{}", hex::encode(update.signature.to_vec()))))]
#[tracing::instrument(err, skip(self), fields(hex_signature = %format!("0x{}", hex::encode(update.signature.to_vec()))))]
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.update(
update.update.previous_root.to_fixed_bytes(),
@ -346,7 +350,6 @@ where
indexed_height: prometheus::IntGauge,
) -> Instrumented<JoinHandle<Result<()>>> {
let indexer = HomeIndexer {
home_name: self.name.to_owned(),
contract: self.contract.clone(),
db: self.db.clone(),
from_height,
@ -364,7 +367,7 @@ where
nonce: u32,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.message_by_nonce(&self.name, destination, nonce)? {
if let Some(update) = self.db.message_by_nonce(destination, nonce)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
@ -377,7 +380,7 @@ where
leaf: H256,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.message_by_leaf(&self.name, leaf)? {
if let Some(update) = self.db.message_by_leaf(leaf)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
@ -389,7 +392,7 @@ where
tree_index: usize,
) -> Result<Option<H256>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.leaf_by_leaf_index(&self.name, tree_index as u32)? {
if let Some(update) = self.db.leaf_by_leaf_index(tree_index as u32)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
@ -416,7 +419,7 @@ where
Ok(self.contract.queue_contains(root.into()).call().await?)
}
#[tracing::instrument(err, skip(self), fields(hexSignature = %format!("0x{}", hex::encode(update.signature.to_vec()))))]
#[tracing::instrument(err, skip(self), fields(hex_signature = %format!("0x{}", hex::encode(update.signature.to_vec()))))]
async fn improper_update(
&self,
update: &SignedUpdate,

@ -4,6 +4,7 @@
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
use color_eyre::eyre::Result;
use ethers::prelude::*;
use num::Num;
use optics_core::*;
@ -69,10 +70,7 @@ contract!(
#[async_trait::async_trait]
impl optics_core::Chain for Chain {
async fn query_balance(
&self,
addr: optics_core::Address,
) -> anyhow::Result<optics_core::Balance> {
async fn query_balance(&self, addr: optics_core::Address) -> Result<optics_core::Balance> {
let balance = format!(
"{:x}",
self.ethers

@ -10,7 +10,6 @@ use std::{convert::TryFrom, error::Error as StdError, sync::Arc};
use crate::report_tx;
#[allow(missing_docs)]
abigen!(
EthereumReplicaInternal,
"./chains/optics-ethereum/abis/Replica.abi.json",
@ -20,6 +19,15 @@ abigen!(
},
);
impl<M> std::fmt::Display for EthereumReplicaInternal<M>
where
M: ethers::providers::Middleware,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
/// A struct that provides access to an Ethereum replica contract
#[derive(Debug)]
pub struct EthereumReplica<M>

@ -7,7 +7,6 @@ use std::sync::Arc;
use crate::report_tx;
#[allow(missing_docs)]
abigen!(
EthereumConnectionManagerInternal,
"./chains/optics-ethereum/abis/XAppConnectionManager.abi.json"

@ -2,7 +2,7 @@
name = "optics-base"
version = "0.1.0"
authors = ["James Prestwich <prestwich@clabs.co>"]
edition = "2018"
edition = "2021"
[dependencies]

@ -2,7 +2,7 @@
name = "optics-core"
version = "0.1.0"
authors = ["James Prestwich <prestwich@clabs.co>"]
edition = "2018"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -25,7 +25,6 @@ rocksdb = { git = "https://github.com/rust-rocksdb/rust-rocksdb" }
prometheus = "0.12.0"
bytes = { version = "1", features = ["serde"]}
num = {version="0", features=["serde"]}
anyhow = "1"
[dev-dependencies]
tokio = {version = "1.0.1", features = ["rt", "time"]}

@ -1,11 +1,12 @@
#![allow(missing_docs)]
use color_eyre::eyre::Result;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Address(pub bytes::Bytes);
#[derive(Debug, Clone, Deserialize, Serialize)]
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Balance(pub num::BigInt);
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -26,7 +27,8 @@ impl std::fmt::Display for ContractLocator {
#[async_trait::async_trait]
pub trait Chain {
async fn query_balance(&self, addr: Address) -> anyhow::Result<Balance>;
/// Query the balance on a chain
async fn query_balance(&self, addr: Address) -> Result<Balance>;
}
impl From<Address> for ethers::types::H160 {

@ -14,7 +14,7 @@ pub use typed_db::*;
mod optics_db;
pub use optics_db::*;
use crate::{Decode, Encode, OpticsError};
use crate::{Decode, Encode, OpticsError, Update};
#[derive(Debug, Clone)]
/// A KV Store
@ -35,6 +35,16 @@ pub enum DbError {
/// Optics Error
#[error("{0}")]
OpticsError(#[from] OpticsError),
/// UpdaterConflictError
///
/// TODO(luke): move this agent-related stuff into optics-base
#[error("Updater attempted to store conflicting signed update. Existing: {existing:?}. New conflicting: {conflicting:?}.")]
UpdaterConflictError {
/// Existing signed update
existing: Update,
/// Conflicting signed update
conflicting: Update,
},
}
type Result<T> = std::result::Result<T, DbError>;

@ -1,13 +1,13 @@
use crate::db::{DbError, TypedDB, DB};
use crate::UpdateMeta;
use crate::{
accumulator::merkle::Proof, traits::RawCommittedMessage, utils, Decode, Encode, OpticsMessage,
accumulator::merkle::Proof, traits::RawCommittedMessage, utils, Decode, OpticsMessage,
SignedUpdate,
};
use color_eyre::Result;
use ethers::core::types::H256;
use tokio::time::sleep;
use tracing::{debug, warn};
use tracing::{debug, error, warn};
use std::future::Future;
use std::time::Duration;
@ -24,59 +24,38 @@ static UPDATE_META: &str = "update_metadata_";
static LATEST_ROOT: &str = "update_latest_root_";
static LATEST_NONCE: &str = "latest_nonce_";
static LATEST_LEAF_INDEX: &str = "latest_known_leaf_index_";
static UPDATER_PRODUCED_UPDATE: &str = "updater_produced_update_";
/// DB handle for storing data tied to a specific home.
///
/// Key structure: ```<home_name>_<additional_prefix(es)>_<key>```
/// Key structure: ```<entity>_<additional_prefix(es)>_<key>```
#[derive(Debug, Clone)]
pub struct OpticsDB(TypedDB);
impl OpticsDB {
/// Instantiated new `OpticsDB`
pub fn new(db: DB) -> Self {
Self(TypedDB::new(db))
}
impl std::ops::Deref for OpticsDB {
type Target = TypedDB;
/// Store encodable value
pub fn store_encodable<V: Encode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: impl AsRef<[u8]>,
value: &V,
) -> Result<(), DbError> {
self.0.store_encodable(entity, prefix, key, value)
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Retrieve decodable value
pub fn retrieve_decodable<V: Decode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: impl AsRef<[u8]>,
) -> Result<Option<V>, DbError> {
self.0.retrieve_decodable(entity, prefix, key)
impl AsRef<TypedDB> for OpticsDB {
fn as_ref(&self) -> &TypedDB {
&self.0
}
}
/// Store encodable kv pair
pub fn store_keyed_encodable<K: Encode, V: Encode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: &K,
value: &V,
) -> Result<(), DbError> {
self.0.store_keyed_encodable(entity, prefix, key, value)
impl AsRef<DB> for OpticsDB {
fn as_ref(&self) -> &DB {
self.0.as_ref()
}
}
/// Retrieve decodable value given encodable key
pub fn retrieve_keyed_decodable<K: Encode, V: Decode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: &K,
) -> Result<Option<V>, DbError> {
self.0.retrieve_keyed_decodable(entity, prefix, key)
impl OpticsDB {
/// Instantiated new `OpticsDB`
pub fn new(entity: impl AsRef<str>, db: DB) -> Self {
Self(TypedDB::new(entity.as_ref().to_owned(), db))
}
/// Store a raw committed message
@ -85,11 +64,7 @@ impl OpticsDB {
/// - `destination_and_nonce` --> `leaf`
/// - `leaf_index` --> `leaf`
/// - `leaf` --> `message`
pub fn store_raw_committed_message(
&self,
home_name: impl AsRef<[u8]>,
message: &RawCommittedMessage,
) -> Result<()> {
pub fn store_raw_committed_message(&self, message: &RawCommittedMessage) -> Result<()> {
let parsed = OpticsMessage::read_from(&mut message.message.clone().as_slice())?;
let destination_and_nonce = parsed.destination_and_nonce();
@ -104,39 +79,31 @@ impl OpticsDB {
leaf_index = message.leaf_index,
"storing raw committed message in db"
);
self.store_leaf(&home_name, message.leaf_index, destination_and_nonce, leaf)?;
self.store_keyed_encodable(&home_name, MESSAGE, &leaf, message)?;
self.store_leaf(message.leaf_index, destination_and_nonce, leaf)?;
self.store_keyed_encodable(MESSAGE, &leaf, message)?;
Ok(())
}
/// Store the latest known leaf_index
///
/// Key --> value: `LATEST_LEAF_INDEX` --> `leaf_index`
pub fn update_latest_leaf_index(
&self,
home_name: impl AsRef<[u8]>,
leaf_index: u32,
) -> Result<(), DbError> {
if let Ok(Some(idx)) = self.retrieve_latest_leaf_index(&home_name) {
pub fn update_latest_leaf_index(&self, leaf_index: u32) -> Result<(), DbError> {
if let Ok(Some(idx)) = self.retrieve_latest_leaf_index() {
if leaf_index <= idx {
return Ok(());
}
}
self.store_encodable(&home_name, "", LATEST_LEAF_INDEX, &leaf_index)
self.store_encodable("", LATEST_LEAF_INDEX, &leaf_index)
}
/// Retrieve the highest known leaf_index
pub fn retrieve_latest_leaf_index(
&self,
home_name: impl AsRef<[u8]>,
) -> Result<Option<u32>, DbError> {
self.retrieve_decodable(home_name, "", LATEST_LEAF_INDEX)
pub fn retrieve_latest_leaf_index(&self) -> Result<Option<u32>, DbError> {
self.retrieve_decodable("", LATEST_LEAF_INDEX)
}
/// Store the leaf keyed by leaf_index
fn store_leaf(
&self,
home_name: impl AsRef<[u8]>,
leaf_index: u32,
destination_and_nonce: u64,
leaf: H256,
@ -146,64 +113,49 @@ impl OpticsDB {
leaf = ?leaf,
"storing leaf hash keyed by index and dest+nonce"
);
self.store_keyed_encodable(&home_name, LEAF, &destination_and_nonce, &leaf)?;
self.store_keyed_encodable(&home_name, LEAF, &leaf_index, &leaf)?;
self.update_latest_leaf_index(&home_name, leaf_index)
self.store_keyed_encodable(LEAF, &destination_and_nonce, &leaf)?;
self.store_keyed_encodable(LEAF, &leaf_index, &leaf)?;
self.update_latest_leaf_index(leaf_index)
}
/// Retrieve a raw committed message by its leaf hash
pub fn message_by_leaf(
&self,
home_name: impl AsRef<[u8]>,
leaf: H256,
) -> Result<Option<RawCommittedMessage>, DbError> {
self.retrieve_keyed_decodable(home_name, MESSAGE, &leaf)
pub fn message_by_leaf(&self, leaf: H256) -> Result<Option<RawCommittedMessage>, DbError> {
self.retrieve_keyed_decodable(MESSAGE, &leaf)
}
/// Retrieve the leaf hash keyed by leaf index
pub fn leaf_by_leaf_index(
&self,
home_name: impl AsRef<[u8]>,
leaf_index: u32,
) -> Result<Option<H256>, DbError> {
self.retrieve_keyed_decodable(home_name, LEAF, &leaf_index)
pub fn leaf_by_leaf_index(&self, leaf_index: u32) -> Result<Option<H256>, DbError> {
self.retrieve_keyed_decodable(LEAF, &leaf_index)
}
/// Retrieve the leaf hash keyed by destination and nonce
pub fn leaf_by_nonce(
&self,
home_name: impl AsRef<[u8]>,
destination: u32,
nonce: u32,
) -> Result<Option<H256>, DbError> {
pub fn leaf_by_nonce(&self, destination: u32, nonce: u32) -> Result<Option<H256>, DbError> {
let dest_and_nonce = utils::destination_and_nonce(destination, nonce);
self.retrieve_keyed_decodable(home_name, LEAF, &dest_and_nonce)
self.retrieve_keyed_decodable(LEAF, &dest_and_nonce)
}
/// Retrieve a raw committed message by its leaf hash
pub fn message_by_nonce(
&self,
home_name: impl AsRef<[u8]>,
destination: u32,
nonce: u32,
) -> Result<Option<RawCommittedMessage>, DbError> {
let leaf = self.leaf_by_nonce(&home_name, destination, nonce)?;
let leaf = self.leaf_by_nonce(destination, nonce)?;
match leaf {
None => Ok(None),
Some(leaf) => self.message_by_leaf(&home_name, leaf),
Some(leaf) => self.message_by_leaf(leaf),
}
}
/// Retrieve a raw committed message by its leaf index
pub fn message_by_leaf_index(
&self,
home_name: impl AsRef<[u8]>,
index: u32,
) -> Result<Option<RawCommittedMessage>, DbError> {
let leaf: Option<H256> = self.leaf_by_leaf_index(&home_name, index)?;
let leaf: Option<H256> = self.leaf_by_leaf_index(index)?;
match leaf {
None => Ok(None),
Some(leaf) => self.message_by_leaf(&home_name, leaf),
Some(leaf) => self.message_by_leaf(leaf),
}
}
@ -211,35 +163,26 @@ impl OpticsDB {
///
/// Keys --> Values:
/// - `replica_domain` --> `nonce`
pub fn store_latest_nonce(
&self,
home_name: impl AsRef<[u8]>,
replica_domain: u32,
nonce: u32,
) -> Result<(), DbError> {
self.store_keyed_encodable(home_name, LATEST_NONCE, &replica_domain, &nonce)?;
pub fn store_latest_nonce(&self, replica_domain: u32, nonce: u32) -> Result<(), DbError> {
self.store_keyed_encodable(LATEST_NONCE, &replica_domain, &nonce)?;
Ok(())
}
/// Retrieves the latest inspected nonce for a given replica domain
pub fn retrieve_latest_nonce(
&self,
home_name: impl AsRef<[u8]>,
replica_domain: u32,
) -> Result<Option<u32>, DbError> {
self.retrieve_keyed_decodable(home_name, LATEST_NONCE, &replica_domain)
pub fn retrieve_latest_nonce(&self, replica_domain: u32) -> Result<Option<u32>, DbError> {
self.retrieve_keyed_decodable(LATEST_NONCE, &replica_domain)
}
/// Store the latest committed
fn store_latest_root(&self, entity: impl AsRef<[u8]>, root: H256) -> Result<(), DbError> {
fn store_latest_root(&self, root: H256) -> Result<(), DbError> {
debug!(root = ?root, "storing new latest root in DB");
self.store_encodable(entity, "", LATEST_ROOT, &root)
self.store_encodable("", LATEST_ROOT, &root)
}
/// Retrieve the latest committed
pub fn retrieve_latest_root(&self, entity: impl AsRef<[u8]>) -> Result<Option<H256>, DbError> {
self.retrieve_decodable(entity, "", LATEST_ROOT)
pub fn retrieve_latest_root(&self) -> Result<Option<H256>, DbError> {
self.retrieve_decodable("", LATEST_ROOT)
}
/// Store update metadata (by update's new root)
@ -248,22 +191,17 @@ impl OpticsDB {
/// - `update_new_root` --> `update_metadata`
pub fn store_update_metadata(
&self,
entity: impl AsRef<[u8]>,
new_root: H256,
metadata: UpdateMeta,
) -> Result<(), DbError> {
debug!(new_root = ?new_root, metadata = ?metadata, "storing update metadata in DB");
self.store_keyed_encodable(entity, UPDATE_META, &new_root, &metadata)
self.store_keyed_encodable(UPDATE_META, &new_root, &metadata)
}
/// Retrieve update metadata (by update's new root)
pub fn retrieve_update_metadata(
&self,
entity: impl AsRef<[u8]>,
new_root: H256,
) -> Result<Option<UpdateMeta>, DbError> {
self.retrieve_keyed_decodable(entity, UPDATE_META, &new_root)
pub fn retrieve_update_metadata(&self, new_root: H256) -> Result<Option<UpdateMeta>, DbError> {
self.retrieve_keyed_decodable(UPDATE_META, &new_root)
}
/// Store a signed update building off latest root
@ -272,11 +210,7 @@ impl OpticsDB {
/// - `LATEST_ROOT` --> `root`
/// - `new_root` --> `prev_root`
/// - `prev_root` --> `update`
pub fn store_latest_update(
&self,
entity: impl AsRef<[u8]>,
update: &SignedUpdate,
) -> Result<(), DbError> {
pub fn store_latest_update(&self, update: &SignedUpdate) -> Result<(), DbError> {
debug!(
previous_root = ?update.update.previous_root,
new_root = ?update.update.new_root,
@ -285,10 +219,10 @@ impl OpticsDB {
// If there is no latest root, or if this update is on the latest root
// update latest root
match self.retrieve_latest_root(&entity)? {
match self.retrieve_latest_root()? {
Some(root) => {
if root == update.update.previous_root {
self.store_latest_root(&entity, update.update.new_root)?;
self.store_latest_root(update.update.new_root)?;
} else {
warn!(
"Attempted to store update not building off latest root: {:?}",
@ -296,12 +230,11 @@ impl OpticsDB {
)
}
}
None => self.store_latest_root(&entity, update.update.new_root)?,
None => self.store_latest_root(update.update.new_root)?,
}
self.store_keyed_encodable(&entity, UPDATE, &update.update.previous_root, update)?;
self.store_keyed_encodable(UPDATE, &update.update.previous_root, update)?;
self.store_keyed_encodable(
&entity,
PREV_ROOT,
&update.update.new_root,
&update.update.previous_root,
@ -311,70 +244,86 @@ impl OpticsDB {
/// Retrieve an update by its previous root
pub fn update_by_previous_root(
&self,
entity: impl AsRef<[u8]>,
previous_root: H256,
) -> Result<Option<SignedUpdate>, DbError> {
self.retrieve_keyed_decodable(entity, UPDATE, &previous_root)
self.retrieve_keyed_decodable(UPDATE, &previous_root)
}
/// Retrieve an update by its new root
pub fn update_by_new_root(
&self,
entity: impl AsRef<[u8]>,
new_root: H256,
) -> Result<Option<SignedUpdate>, DbError> {
let prev_root: Option<H256> =
self.retrieve_keyed_decodable(&entity, PREV_ROOT, &new_root)?;
pub fn update_by_new_root(&self, new_root: H256) -> Result<Option<SignedUpdate>, DbError> {
let prev_root: Option<H256> = self.retrieve_keyed_decodable(PREV_ROOT, &new_root)?;
match prev_root {
Some(prev_root) => self.update_by_previous_root(&entity, prev_root),
Some(prev_root) => self.update_by_previous_root(prev_root),
None => Ok(None),
}
}
/// Iterate over all leaves
pub fn leaf_iterator(&self) -> PrefixIterator<H256> {
PrefixIterator::new(self.0.db().prefix_iterator(LEAF_IDX), LEAF_IDX.as_ref())
PrefixIterator::new(self.0.as_ref().prefix_iterator(LEAF_IDX), LEAF_IDX.as_ref())
}
/// Store a proof by its leaf index
///
/// Keys --> Values:
/// - `leaf_index` --> `proof`
pub fn store_proof(
&self,
home_name: impl AsRef<[u8]>,
leaf_index: u32,
proof: &Proof,
) -> Result<(), DbError> {
pub fn store_proof(&self, leaf_index: u32, proof: &Proof) -> Result<(), DbError> {
debug!(leaf_index, "storing proof in DB");
self.store_keyed_encodable(home_name, PROOF, &leaf_index, proof)
self.store_keyed_encodable(PROOF, &leaf_index, proof)
}
/// Retrieve a proof by its leaf index
pub fn proof_by_leaf_index(
&self,
home_name: impl AsRef<[u8]>,
leaf_index: u32,
) -> Result<Option<Proof>, DbError> {
self.retrieve_keyed_decodable(home_name, PROOF, &leaf_index)
pub fn proof_by_leaf_index(&self, leaf_index: u32) -> Result<Option<Proof>, DbError> {
self.retrieve_keyed_decodable(PROOF, &leaf_index)
}
// TODO(james): this is a quick-fix for the prover_sync and I don't like it
/// poll db ever 100 milliseconds waitinf for a leaf.
pub fn wait_for_leaf(
&self,
home_name: impl AsRef<[u8]>,
leaf_index: u32,
) -> impl Future<Output = Result<Option<H256>, DbError>> {
let slf = self.clone();
async move {
loop {
if let Some(leaf) = slf.leaf_by_leaf_index(&home_name, leaf_index)? {
if let Some(leaf) = slf.leaf_by_leaf_index(leaf_index)? {
return Ok(Some(leaf));
}
sleep(Duration::from_millis(100)).await
}
}
}
/// Store a pending update in the DB for potential submission.
///
/// This does not produce update meta or update the latest update db value.
/// It is used by update production and submission.
pub fn store_produced_update(&self, update: &SignedUpdate) -> Result<(), DbError> {
let existing_opt = self.retrieve_produced_update(update.update.previous_root)?;
if let Some(existing) = existing_opt {
if existing.update.new_root != update.update.new_root {
error!("Updater attempted to store conflicting update. Existing update: {:?}. New conflicting update: {:?}.", &existing, &update);
return Err(DbError::UpdaterConflictError {
existing: existing.update,
conflicting: update.update,
});
}
}
self.store_keyed_encodable(
UPDATER_PRODUCED_UPDATE,
&update.update.previous_root,
update,
)
}
/// Retrieve a pending update from the DB (if one exists).
pub fn retrieve_produced_update(
&self,
previous_root: H256,
) -> Result<Option<SignedUpdate>, DbError> {
self.retrieve_keyed_decodable(UPDATER_PRODUCED_UPDATE, &previous_root)
}
}

@ -6,22 +6,26 @@ use color_eyre::Result;
///
/// Key structure: ```<type_prefix>_<additional_prefix(es)>_<key>```
#[derive(Debug, Clone)]
pub struct TypedDB(DB);
pub struct TypedDB {
entity: String,
db: DB,
}
impl TypedDB {
/// Instantiate new `TypedDB`
pub fn new(db: DB) -> Self {
Self(db)
impl AsRef<DB> for TypedDB {
fn as_ref(&self) -> &DB {
&self.db
}
}
/// Return reference to raw db
pub fn db(&self) -> &DB {
&self.0
impl TypedDB {
/// Instantiate new `TypedDB`
pub fn new(entity: String, db: DB) -> Self {
Self { entity, db }
}
fn full_prefix(entity: impl AsRef<[u8]>, prefix: impl AsRef<[u8]>) -> Vec<u8> {
fn full_prefix(&self, prefix: impl AsRef<[u8]>) -> Vec<u8> {
let mut full_prefix = vec![];
full_prefix.extend(entity.as_ref());
full_prefix.extend(self.entity.as_ref() as &[u8]);
full_prefix.extend("_".as_bytes());
full_prefix.extend(prefix.as_ref());
full_prefix
@ -30,46 +34,41 @@ impl TypedDB {
/// Store encodable value
pub fn store_encodable<V: Encode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: impl AsRef<[u8]>,
value: &V,
) -> Result<(), DbError> {
self.0
.store_encodable(TypedDB::full_prefix(entity, prefix), key, value)
self.db
.store_encodable(self.full_prefix(prefix), key, value)
}
/// Retrieve decodable value
pub fn retrieve_decodable<V: Decode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: impl AsRef<[u8]>,
) -> Result<Option<V>, DbError> {
self.0
.retrieve_decodable(TypedDB::full_prefix(entity, prefix), key)
self.db.retrieve_decodable(self.full_prefix(prefix), key)
}
/// Store encodable kv pair
pub fn store_keyed_encodable<K: Encode, V: Encode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: &K,
value: &V,
) -> Result<(), DbError> {
self.0
.store_keyed_encodable(TypedDB::full_prefix(entity, prefix), key, value)
self.db
.store_keyed_encodable(self.full_prefix(prefix), key, value)
}
/// Retrieve decodable value given encodable key
pub fn retrieve_keyed_decodable<K: Encode, V: Decode>(
&self,
entity: impl AsRef<[u8]>,
prefix: impl AsRef<[u8]>,
key: &K,
) -> Result<Option<V>, DbError> {
self.0
.retrieve_keyed_decodable(TypedDB::full_prefix(entity, prefix), key)
self.db
.retrieve_keyed_decodable(self.full_prefix(prefix), key)
}
}

@ -24,6 +24,7 @@ pub mod models {
/// Async Traits for Homes & Replicas for use in applications
mod traits;
use ethers_signers::WalletError;
pub use traits::*;
/// Utilities to match contract values
@ -53,7 +54,10 @@ pub use identifiers::OpticsIdentifier;
use async_trait::async_trait;
use ethers::{
core::types::{Address as EthAddress, Signature, SignatureError, H256},
prelude::{transaction::eip2718::TypedTransaction, AwsSigner},
prelude::{
transaction::{eip2718::TypedTransaction, eip712::Eip712},
AwsSigner,
},
signers::{AwsSignerError, LocalWallet, Signer},
};
@ -86,6 +90,9 @@ pub enum SignersError {
/// AWS Signer Error
#[error("{0}")]
AwsSignerError(#[from] AwsSignerError),
/// Wallet Signer Error
#[error("{0}")]
WalletError(#[from] WalletError),
}
impl From<Infallible> for SignersError {
@ -157,6 +164,16 @@ impl Signer for Signers {
Signers::Aws(signer) => signer.chain_id(),
}
}
async fn sign_typed_data<T: Eip712 + Send + Sync>(
&self,
payload: &T,
) -> Result<Signature, Self::Error> {
match self {
Signers::Local(signer) => Ok(signer.sign_typed_data(payload).await?),
Signers::Aws(signer) => Ok(signer.sign_typed_data(payload).await?),
}
}
}
#[async_trait]

@ -8,6 +8,7 @@ pub struct Waiting {
}
/// Pending update state
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
pub struct Pending {
root: H256,
@ -103,6 +104,7 @@ impl Replica<Waiting> {
update: &SignedUpdate,
now: impl FnOnce() -> U256,
) -> Result<Replica<Pending>, Self> {
#[allow(clippy::question_mark)]
if self.check_sig(update).is_err() {
return Err(self);
}

@ -58,6 +58,7 @@ impl<const N: usize> HexString<N> {
}
// Lazy. Should do the check as a cheaper action
#[allow(clippy::question_mark)]
if hex::decode(s).is_err() {
bail!("String is not hex");
}

@ -2,7 +2,7 @@
name = "optics-test"
version = "0.1.0"
authors = ["Luke Tchang <ltchang@stanford.edu>"]
edition = "2018"
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }

@ -49,7 +49,7 @@ mod test {
async fn db_stores_and_retrieves_messages() {
run_test_db(|db| async move {
let home_name = "home_1".to_owned();
let db = OpticsDB::new(db);
let db = OpticsDB::new(home_name, db);
let m = OpticsMessage {
origin: 10,
@ -67,23 +67,19 @@ mod test {
};
assert_eq!(m.to_leaf(), message.leaf());
db.store_raw_committed_message(&home_name, &message)
.unwrap();
db.store_raw_committed_message(&message).unwrap();
let by_nonce = db
.message_by_nonce(&home_name, m.destination, m.nonce)
.message_by_nonce(m.destination, m.nonce)
.unwrap()
.unwrap();
assert_eq!(by_nonce, message);
let by_leaf = db
.message_by_leaf(&home_name, message.leaf())
.unwrap()
.unwrap();
let by_leaf = db.message_by_leaf(message.leaf()).unwrap().unwrap();
assert_eq!(by_leaf, message);
let by_index = db
.message_by_leaf_index(&home_name, message.leaf_index)
.message_by_leaf_index(message.leaf_index)
.unwrap()
.unwrap();
assert_eq!(by_index, message);
@ -95,16 +91,16 @@ mod test {
async fn db_stores_and_retrieves_proofs() {
run_test_db(|db| async move {
let home_name = "home_1".to_owned();
let db = OpticsDB::new(db);
let db = OpticsDB::new(home_name, db);
let proof = Proof {
leaf: H256::from_low_u64_be(15),
index: 32,
path: Default::default(),
};
db.store_proof(&home_name, 13, &proof).unwrap();
db.store_proof(13, &proof).unwrap();
let by_index = db.proof_by_leaf_index(&home_name, 13).unwrap().unwrap();
let by_index = db.proof_by_leaf_index(13).unwrap().unwrap();
assert_eq!(by_index, proof);
})
.await;

@ -1,7 +1,7 @@
[package]
name = "balance-exporter"
version = "0.1.0"
edition = "2018"
edition = "2021"
description = "Polls chains for optics contract wallet balances and reports them in OpenMetrics format"
authors = ["ember arlynx <ember.arlynx@clabs.co>"]
license = "Apache-2.0"

@ -1,12 +1,12 @@
[package]
name = "kms-cli"
version = "0.1.0"
edition = "2018"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "3.0.0-beta.4"
clap = "3.0.0-beta.5"
color-eyre = "0.5.11"
ethers = { git = "https://github.com/gakonst/ethers-rs", branch = "master" }
ethers-signers = { git = "https://github.com/gakonst/ethers-rs", branch = "master", features = ["aws"] }

@ -11,12 +11,11 @@ use once_cell::sync::OnceCell;
use rusoto_core::{credential::EnvironmentProvider, HttpClient};
use rusoto_kms::KmsClient;
use clap::Clap;
use clap::Parser;
static KMS_CLIENT: OnceCell<KmsClient> = OnceCell::new();
fn init_kms(region: String) {
// setup KMS
let client =
rusoto_core::Client::new_with(EnvironmentProvider::default(), HttpClient::new().unwrap());
if KMS_CLIENT
@ -30,7 +29,7 @@ fn init_kms(region: String) {
}
}
#[derive(Clap)]
#[derive(Parser)]
pub struct Tx {
// TX
/// The TX value (in wei)
@ -61,19 +60,20 @@ pub struct Tx {
rpc: String,
}
#[derive(Clap)]
#[derive(Parser)]
pub struct Info {}
#[derive(Clap)]
#[derive(Parser)]
/// Subcommands
#[allow(clippy::large_enum_variant)]
pub enum SubCommands {
/// Send a tx signed by the KMS key
Tx(Tx),
Transaction(Tx),
/// Print the key info (region, id, address)
Info(Info),
}
#[derive(Clap)]
#[derive(Parser)]
#[clap(version = "0.1", author = "James Prestwich")]
pub struct Opts {
#[clap(subcommand)]
@ -141,7 +141,7 @@ fn prep_tx_request(opts: &Tx) -> TransactionRequest {
async fn _send_tx(signer: &AwsSigner<'_>, opts: &Opts) -> Result<()> {
let tx: &Tx = match opts.sub {
SubCommands::Tx(ref tx) => tx,
SubCommands::Transaction(ref tx) => tx,
SubCommands::Info(_) => unreachable!(),
};
@ -192,7 +192,7 @@ async fn _main() -> Result<()> {
let opts: Opts = Opts::parse();
init_kms(opts.region.to_owned());
let chain_id = match opts.sub {
SubCommands::Tx(ref tx) => tx.chain_id.unwrap_or(1),
SubCommands::Transaction(ref tx) => tx.chain_id.unwrap_or(1),
SubCommands::Info(_) => 1,
};
@ -201,7 +201,7 @@ async fn _main() -> Result<()> {
.with_chain_id(chain_id);
match opts.sub {
SubCommands::Tx(_) => _send_tx(&signer, &opts).await,
SubCommands::Transaction(_) => _send_tx(&signer, &opts).await,
SubCommands::Info(_) => _print_info(&signer, &opts).await,
}
}

@ -1,7 +1,7 @@
[package]
name = "optics-cli"
version = "0.1.0"
edition = "2018"
edition = "2021"
[dependencies]
color-eyre = "0.5.11"

@ -29,7 +29,7 @@ type OutputVec = Vec<((H256, u64), Vec<CommittedMessage>)>;
impl DbStateCommand {
pub async fn run(&self) -> Result<()> {
let db = OpticsDB::new(DB::from_path(&self.db_path)?);
let db = OpticsDB::new(self.home_name.to_owned(), DB::from_path(&self.db_path)?);
let messages_by_committed_roots = self.create_comitted_root_to_message_map(&db)?;
@ -50,9 +50,9 @@ impl DbStateCommand {
) -> Result<HashMap<H256, Vec<CommittedMessage>>> {
let mut messages_by_committed_roots: HashMap<H256, Vec<CommittedMessage>> = HashMap::new();
for index in 0.. {
match db.message_by_leaf_index(&self.home_name, index)? {
match db.message_by_leaf_index(index)? {
Some(message) => {
if db.proof_by_leaf_index(&self.home_name, index)?.is_none() {
if db.proof_by_leaf_index(index)?.is_none() {
println!("Failed to find proof for leaf index {}!", index);
}
@ -89,15 +89,13 @@ impl DbStateCommand {
// Create mapping of (update root, block_number) to [messages]
let mut output_map: HashMap<(H256, u64), Vec<CommittedMessage>> = HashMap::new();
for (committed_root, bucket) in messages_by_committed_roots {
let containing_update_opt =
db.update_by_previous_root(&self.home_name, committed_root)?;
let containing_update_opt = db.update_by_previous_root(committed_root)?;
match containing_update_opt {
Some(containing_update) => {
let new_root = containing_update.update.new_root;
let update_metadata = db
.retrieve_update_metadata(&self.home_name, new_root)?
.unwrap_or_else(|| {
let update_metadata =
db.retrieve_update_metadata(new_root)?.unwrap_or_else(|| {
panic!("Couldn't find metadata for update {:?}", containing_update)
});

@ -110,23 +110,19 @@ impl ProveCommand {
}
fn fetch_proof(&self) -> Result<(OpticsMessage, Proof)> {
let db = OpticsDB::new(DB::from_path(&self.db_path)?);
let db = OpticsDB::new(self.home_name.to_owned(), DB::from_path(&self.db_path)?);
let idx = match (self.leaf_index, self.leaf) {
(Some(idx), _) => idx,
(None, Some(digest)) => match db.message_by_leaf(&self.home_name, digest)? {
(None, Some(digest)) => match db.message_by_leaf(digest)? {
Some(leaf) => leaf.leaf_index,
None => bail!("No leaf index or "),
},
(None, None) => bail!("Must provide leaf index or leaf hash"),
};
let proof = db
.proof_by_leaf_index(&self.home_name, idx)?
.expect("no proof");
let message = db
.message_by_leaf_index(&self.home_name, idx)?
.expect("no message");
let proof = db.proof_by_leaf_index(idx)?.expect("no proof");
let message = db.message_by_leaf_index(idx)?.expect("no message");
let message = OpticsMessage::read_from(&mut message.message.as_slice())?;
Ok((message, proof))

Loading…
Cancel
Save