Merge branch 'main' of github.com:abacus-network/abacus-monorepo into trevor/multisig-validator-manager

pull/334/head
Trevor Porter 3 years ago
commit a792402b1f
  1. 1
      .gitignore
  2. 182
      README.md
  3. 59
      docs/README.md
  4. 227
      docs/agents/agent-operations.md
  5. 78
      docs/agents/developing.md
  6. 86
      docs/architecture.md
  7. 70
      docs/contributing.md
  8. 109
      docs/failure-cases.md
  9. 154
      docs/faq.md
  10. 170
      docs/governance/cross-chain-governance.md
  11. BIN
      docs/images/Governance-XApp.jpeg
  12. BIN
      docs/images/Optics-Architecture.png
  13. BIN
      docs/images/Upgrade-Setup-1.png
  14. BIN
      docs/images/Upgrade-Setup-2.png
  15. BIN
      docs/images/Upgrade-Setup-DEPRECATED.png
  16. 53
      docs/upgrade-setup.md
  17. 11
      docs/versioning.md
  18. 87
      docs/xapps/developing.md
  19. 121
      docs/xapps/token-bridge.md
  20. 64
      package-lock.json
  21. 2
      rust/.gitignore
  22. 144
      rust/Cargo.lock
  23. 3
      rust/Cargo.toml
  24. 11
      rust/Dockerfile
  25. 8
      rust/abacus-base/bin/example.rs
  26. 143
      rust/abacus-base/src/agent.rs
  27. 57
      rust/abacus-base/src/contract_sync/last_update.rs
  28. 6
      rust/abacus-base/src/contract_sync/metrics.rs
  29. 599
      rust/abacus-base/src/contract_sync/mod.rs
  30. 21
      rust/abacus-base/src/contract_sync/schema.rs
  31. 433
      rust/abacus-base/src/home.rs
  32. 4
      rust/abacus-base/src/inbox.rs
  33. 90
      rust/abacus-base/src/indexer.rs
  34. 13
      rust/abacus-base/src/lib.rs
  35. 15
      rust/abacus-base/src/macros.rs
  36. 4
      rust/abacus-base/src/metrics.rs
  37. 12
      rust/abacus-base/src/outbox.rs
  38. 366
      rust/abacus-base/src/replica.rs
  39. 74
      rust/abacus-base/src/settings/chains.rs
  40. 163
      rust/abacus-base/src/settings/mod.rs
  41. 191
      rust/abacus-base/src/xapp.rs
  42. 3
      rust/abacus-core/bin/lib_test_output.rs
  43. 4
      rust/abacus-core/src/accumulator/mod.rs
  44. 160
      rust/abacus-core/src/db/abacus_db.rs
  45. 14
      rust/abacus-core/src/db/mod.rs
  46. 39
      rust/abacus-core/src/lib.rs
  47. 187
      rust/abacus-core/src/models/home.rs
  48. 0
      rust/abacus-core/src/models/mod.rs
  49. 131
      rust/abacus-core/src/models/replica.rs
  50. 74
      rust/abacus-core/src/test_output.rs
  51. 2
      rust/abacus-core/src/traits/common.rs
  52. 196
      rust/abacus-core/src/traits/home.rs
  53. 32
      rust/abacus-core/src/traits/indexer.rs
  54. 98
      rust/abacus-core/src/traits/message.rs
  55. 71
      rust/abacus-core/src/traits/mod.rs
  56. 2
      rust/abacus-core/src/traits/outbox.rs
  57. 43
      rust/abacus-core/src/traits/replica.rs
  58. 53
      rust/abacus-core/src/traits/xapp.rs
  59. 71
      rust/abacus-core/src/types/failure.rs
  60. 2
      rust/abacus-core/src/types/messages.rs
  61. 4
      rust/abacus-core/src/types/mod.rs
  62. 184
      rust/abacus-core/src/types/update.rs
  63. 139
      rust/abacus-test/src/mocks/home.rs
  64. 20
      rust/abacus-test/src/mocks/indexer.rs
  65. 12
      rust/abacus-test/src/mocks/mod.rs
  66. 2
      rust/abacus-test/src/mocks/outbox.rs
  67. 123
      rust/abacus-test/src/mocks/replica.rs
  68. 105
      rust/abacus-test/src/mocks/xapp.rs
  69. 8
      rust/abacus-test/src/test_utils.rs
  70. 57
      rust/agents/kathy/src/kathy.rs
  71. 4
      rust/agents/kathy/src/main.rs
  72. 32
      rust/agents/processor/Cargo.toml
  73. 46
      rust/agents/processor/src/main.rs
  74. 503
      rust/agents/processor/src/processor.rs
  75. 176
      rust/agents/processor/src/prover.rs
  76. 328
      rust/agents/processor/src/prover_sync.rs
  77. 147
      rust/agents/processor/src/push.rs
  78. 25
      rust/agents/processor/src/settings.rs
  79. 6
      rust/agents/relayer/src/main.rs
  80. 12
      rust/agents/relayer/src/relayer.rs
  81. 34
      rust/agents/updater/Cargo.toml
  82. 67
      rust/agents/updater/src/main.rs
  83. 139
      rust/agents/updater/src/produce.rs
  84. 13
      rust/agents/updater/src/settings.rs
  85. 71
      rust/agents/updater/src/submit.rs
  86. 136
      rust/agents/updater/src/updater.rs
  87. 31
      rust/agents/watcher/Cargo.toml
  88. 44
      rust/agents/watcher/src/main.rs
  89. 13
      rust/agents/watcher/src/settings.rs
  90. 980
      rust/agents/watcher/src/watcher.rs
  91. 601
      rust/chains/abacus-ethereum/abis/Home.abi.json
  92. 518
      rust/chains/abacus-ethereum/abis/Replica.abi.json
  93. 319
      rust/chains/abacus-ethereum/abis/XAppConnectionManager.abi.json
  94. 315
      rust/chains/abacus-ethereum/src/home.rs
  95. 2
      rust/chains/abacus-ethereum/src/inbox.rs
  96. 36
      rust/chains/abacus-ethereum/src/lib.rs
  97. 300
      rust/chains/abacus-ethereum/src/replica.rs
  98. 140
      rust/chains/abacus-ethereum/src/xapp.rs
  99. 4
      rust/config/test/alfajores_config.json
  100. 4
      rust/config/test/fuji_config.json
  101. Some files were not shown because too many files have changed in this diff Show More

1
.gitignore vendored

@ -18,4 +18,3 @@ typescript/*/.env
typescript/*/node_modules typescript/*/node_modules
typescript/**/tsconfig.tsbuildinfo typescript/**/tsconfig.tsbuildinfo
**/**/tsconfig.tsbuildinfo **/**/tsconfig.tsbuildinfo
typescript/optics-provider/src/tmp.ts

@ -1,132 +1,94 @@
# Optics # Abacus
OPTimistic Interchain Communication
## Overview ## Overview
Optics is a cross-chain communication system. It handles passing raw buffers Abacus is a cross-chain communication system. It handles passing raw buffers
between blockchains cheaply, and with minimal fuss. Like IBC and other between blockchains cheaply, and with minimal fuss. Like IBC and other
cross-chain communication systems, Optics creates channels between chains, and cross-chain communication systems, Abacus creates channels between chains, and
then passes its messages over the channel. Once a channel is established, any then passes its messages over the channel. Once a channel is established, any
application on the chain can use it to send messages to any other chain. application on the chain can use it to send messages to any other chain.
Compared to IBC and PoS light client based cross-chain communication, Optics Compared to IBC and PoS light client based cross-chain communication, Abacus
has weaker security guarantees, and a longer latency period. However, Optics has weaker security guarantees. However, Abacus may be implemented on any smart
may be implemented on any smart contract chain, with no bespoke light client contract chain, with no bespoke light client engineering. Because it does not run
engineering. Because it does not run a light client, Optics does not spend a light client, Abacus does not spend extra gas verifying remote chain block headers.
extra gas verifying remote chain block headers.
In other words, Optics is designed to prioritize: In other words, Abacus is designed to prioritize:
- Cost: No header verification or state management. - Cost: No header verification or state management.
- Speed of implementation: Requires only simple smart contracts, no complex - Speed of implementation: Requires only simple smart contracts, no complex
cryptography. cryptography.
- Ease of use: Simple interface for maintaining xApp connections. - Ease of use: Simple interface for maintaining xApp connections.
You can read more about Optics' architecture [at Celo's main documentation site](https://docs.celo.org/celo-codebase/protocol/optics) or [within the docs folder of this repository](./docs/README.md). You can read more about Abacus' architecture in the [documentation](https://docs.useabacus.network/).
## Integrating with Optics ## Integrating with Abacus
Optics establishes communication channels with other chains, but it's up to xApp (pronounced "zap", and short for "cross-chain applications") Abacus establishes communication channels with other chains, but it's up to app
developers to use those. This repo provides a standard pattern for integrating developers to use those. This repo provides a standard pattern for integrating
Optics channels, and ensuring that communication is safe and secure. Abacus channels, and ensuring that communication is safe and secure.
Integrations require a few key components: Integrations require a few key components:
- A `Home` and any number of `Replica` contracts deployed on the chain already. - A `Outbox` and any number of `Inbox` contracts deployed on the chain already.
These contracts manage Optics communication channels. and will be used by the These contracts manage Abacus communication channels and will be used by the
xApp to send and receive messages. app to send and receive messages.
- A `XAppConnectionManager` (in `solidity/optics-core/contracts`). This - A `XAppConnectionManager` (in `solidity/core/contracts`). This
contract connects the xApp to Optics by allowing the xApp admin to enroll new contract connects the app to Abacus by allowing the app admin to enroll new
`Home` and `Replica` contracts. Enrolling and unenrolling channels is the `Outbox` and `Inbox` contracts. Enrolling and unenrolling channels is the
primary way to ensure that your xApp handles messages correctly. xApps may primary way to ensure that your app handles messages correctly. Apps may
deploy their own connection manager, or share one with other xApps. deploy their own connection manager, or share one with other apps.
- A `Message` library. Optics sends raw byte arrays between chains. The xApp - A `Message` library. Abacus sends raw byte arrays between chains. The app
must define a message specification that can be serialized for sending, and must define a message specification that can be serialized for sending, and
deserialized for handling on the remote chain deserialized for handling on the remote chain
- A `Router` contract. The router translates between the Optics cross-chain - A `Router` contract. The router translates between the Abacus cross-chain
message format, and the local chain's call contract. It also implements the message format, and the local chain's call contract. It also implements the
business logic of the xApp. It exposes the user-facing interface, handles business logic of the app. It exposes the user-facing interface, handles
messages coming in from other chains, and dispatches messages being sent to messages coming in from other chains, and dispatches messages being sent to
other chains. other chains.
Solidity developers interested in implementing their own `Message` library and Solidity developers interested in implementing their own `Message` library and
`Router` contract should check out the [optics-xapps](https://github.com/celo-org/optics-monorepo/tree/main/solidity/optics-xapps) `Router` contract should check out the [apps](./solidity/apps/) package. It contains several example xApps.
package. It contains several example xApps.
You can find current testnet deploy configurations in the `rust/config/` You can find current testnet deploy configurations in the `rust/config/`
directory. These deployments happen frequently and are unstable. Please feel directory. These deployments happen frequently and are unstable. Please feel
free to try out integrations using the deployed contracts in the LATEST config. free to try out integrations using the deployed contracts in the LATEST config.
It is **Strongly Recommended** that xApp admins run a `watcher` daemon to ## Working on Abacus
maintain their `XAppConnectionManager` and guard from fraud. Please see the
documentation in the `rust/` directory and the
[Optics architecture documentation](https://docs.celo.org/celo-codebase/protocol/optics)
for more details.
## Working on Optics
### Commit signature verification ### Commit signature verification
Commits (and tags) for this repo require [signature verification](https://docs.github.com/en/github/authenticating-to-github/managing-commit-signature-verification/about-commit-signature-verification). If you'd like to contribute to Optics, make sure that your commits are signed locally. Commits (and tags) for this repo require [signature verification](https://docs.github.com/en/github/authenticating-to-github/managing-commit-signature-verification/about-commit-signature-verification). If you'd like to contribute to Abacus, make sure that your commits are signed locally.
### Pre-commit hooks
Set up your pre-commit hook:
```bash ### Workspaces
echo "./pre-commit.sh" > .git/hooks/pre-commit
chmod +x .git/hooks/pre-commit
```
Note: In the event you need to bypass the pre-commit hooks, pass the This monorepo uses [NPM Workspaces](https://docs.npmjs.com/cli/v7/using-npm/workspaces/). Installing dependencies, building, testing, and running prettier for all packages can be done from the root directory of the repository.
`--no-verify` flag to your `git commit` command
### Solidity - Installing dependencies
1. Install dependencies
```bash ```bash
cd solidity/optics-core npm install
npm i
cd ../optics-xapps
npm i
``` ```
2. Setup your `.env` files - Building
```bash ```bash
cd typescript/abacus-deploy npm run build
touch .env && cat .env.example > .env
cd ../../solidity/optics-core
touch .env && cat .env.example > .env
cd ../optics-xapps
touch .env && cat .env.example > .env
``` ```
Then, add values to the keys in the newly created `.env` files. - Testing
3. Install jq
```bash ```bash
brew install jq npm run test
``` ```
  OR   - Running prettier
```bash ```bash
sudo apt-get install jq npm run prettier
```
4. Install solhint
```bash
npm install -g solhint
// to check it is installed:
solhint --version
``` ```
### Rust ### Rust
@ -144,12 +106,12 @@ cd rust
./release.sh <image_tag> ./release.sh <image_tag>
``` ```
# What is Optics? # What is Abacus?
We present Optics — a system for sending messages between consensus systems We present Abacus — a system for sending messages between consensus systems
without paying header validation costs by creating the illusion of cross-chain without paying header validation costs by creating the illusion of cross-chain
communication. Similar to an atomic swap, Optics uses non-global protocol communication. Similar to an atomic swap, Abacus uses non-global protocol
validation to simulate cross-chain communication. Optics can carry arbitrary validation to simulate cross-chain communication. Abacus can carry arbitrary
messages (raw byte vectors), uses a single-producer multi-consumer model, and messages (raw byte vectors), uses a single-producer multi-consumer model, and
has protocol overhead sublinear in the number of messages being sent. has protocol overhead sublinear in the number of messages being sent.
@ -157,60 +119,36 @@ has protocol overhead sublinear in the number of messages being sent.
System sketch: System sketch:
1. A "home" chain commits messages in a merkle tree 1. An "outbox" chain commits messages in a merkle tree
2. A bonded "updater" attests to the commitment 2. Bonded "validators" attest to the commitment via "checkpoints"
3. The home chain ensures the attestation is accurate, and slashes if not 3. Attested checkpoints are relayed to any number of "inbox" chains
4. Attested updates are replayed on any number of "replica" chains, after a
time delay
As a result, one of the following is always true: As a result, one of the following is always true:
1. All replicas have a valid commitment to messages from the home chain 1. All inboxes have a valid commitment to messages from the outbox chain
2. Failure was published before processing, and the updater can be slashed on 2. Misbehaving validators can be slashed on the outbox chain
the home chain
This guarantee, although weaker than header-chain validation, is still likely This guarantee, although weaker than header-chain validation, is still likely
acceptable for most applications. acceptable for most applications.
## Summary ## Summary
Optics is a new strategy for simulating cross-chain communication without Abacus is a new strategy for simulating cross-chain communication without
validating headers. The goal is to create a single short piece of state (a validating headers. The goal is to create a single short piece of state (a
32-byte hash) that can be updated regularly. This hash represents a merkle tree 32-byte hash) that can be updated regularly. This hash represents a merkle tree
containing a set of cross-chain messages being sent by a single chain (the containing a set of cross-chain messages being sent by a single chain (the
"home" chain for the Optics system). Contracts on the home chain can submit "outbox" chain for the Abacus system). Contracts on the outbox chain can submit
messages, which are put into a merkle tree (the "message tree"). The message messages, which are put into a merkle tree (the "message tree"). The message
tree's root may be transferred to any number of "replica" chains. tree's root may be transferred to any number of "inbox" chains.
Rather than proving validity of the commitment, we put a delay on message The outbox chain designates validators. A validator places a bond ensuring
receipt, and ensure that failures are publicly visible. This ensures that her good behavior. She is responsible for producing signed attestations of the
participants in the protocol have a chance to react to failures before the new message tree root. These attestations are relayed to inbox chains.
failure can harm them. Which is to say, rather than preventing the inclusion of
bad messages, Optics guarantees that message recipients are aware of the The inbox accepts a checkpoint attestation signed by validators. Because this root
inclusion, and have a chance to refuse to process them. contains a commitment of all messages sent by the outbox chain, these messages
can be proven (using the inbox's root) and then dispatched to contracts on the
To produce this effect, the home chain designates a single "updater." The inbox chain.
updater places a bond ensuring her good behavior. She is responsible for
producing signed attestations of the new message tree root. The home chain
accepts and validates these attestations. It ensures that they extend a
previous attestation, and contain a valid new root of the message set. These
attestations are then sent to each replica.
The replica accepts an update attestation signed by the updater, and puts it in
a pending state. After a timeout, it accepts the update from that attestation
and stores a new local root. Because this root contains a commitment of all
messages sent by the home chain, these messages can be proven (using the
replica's root) and then dispatched to contracts on the replica chain.
The timeout on new updates to the replica serves two purposes:
1. It ensures that any misbehavior by the updater is published **in advance**
of message processing. This guarantees that data necessary for home chain
slashing is available for all faults.
2. It gives message recipients a chance to opt-out of message processing for
the update. If an incorrect update is published, recipients always have the
information necessary to take defensive measures before any messages can be
processed.
## Deploy Procedure ## Deploy Procedure
@ -218,8 +156,7 @@ The contract addresses of each deploy can be found in `rust/config`. The latest
deploy will be at `rust/config/[latest timestamp]` with bridge contracts within deploy will be at `rust/config/[latest timestamp]` with bridge contracts within
that same folder under `/bridge/[latest timestamp]`. that same folder under `/bridge/[latest timestamp]`.
The agents are setup to point at 2 environments at a time: `deployment` and The agents are set up to point at one environment at a time.
`staging`.
When agents are deployed to point at a new environment, they cease to point at When agents are deployed to point at a new environment, they cease to point at
the old ones. We **do not** continue to operate off-chain agents on old contract the old ones. We **do not** continue to operate off-chain agents on old contract
@ -228,6 +165,3 @@ messages will not be relayed between chains).
Off-chain agents are **not** automatically re-deployed when new contract deploys Off-chain agents are **not** automatically re-deployed when new contract deploys
are merged. Auto-redeploys will be implemented at some future date. are merged. Auto-redeploys will be implemented at some future date.
The Optics team will maintain a document [here](./docs/agents/agent-operations)
that will specify the contracts supported by the rust agents.

@ -1,59 +0,0 @@
# Optics: OPTimistic Interchain Communication
## Table of Contents
- [Frequently Asked Questions](./faq.md)
- [Architecture](./architecture.md)
- [Upgrade Setup](./upgrade-setup.md)
- [Versioning](./versioning.md)
- **xApps**
- [Developing xApps](./xapps/developing.md)
- [Token Bridge xApp](./xapps/token-bridge.md)
- **Governance**
- [Cross-Chain Governance](./governance/cross-chain-governance.md)
- **Agents**
- [Agent Operations](./agents/agent-operations.md)
- [Failure Cases](./failure-cases.md)
- [Contributing To This Repo](./contributing.md)
## What is Optics?
Optics is a new design for radically cheaper cross-chain communication *without header verification.* We expect operating Optics to cut 90% of costs compared to a traditional header relay.
Optics will form the base layer of a cross-chain communication network that provides fast, cheap communication for all smart contract chains, rollups, etc. It relies only on widely-available cryptographic primitives (unlike header relays), has latency around 2-3 hours (unlike an ORU message passing layer), and imposes only about 120,000 gas overhead on message senders.
Optics has been designed for ease of implementation in any blockchain that supports user-defined computations. We will provide initial Solidity implementations of the on-chain contracts, and Rust implementations of the off-chain system agents.
## How does it work?
Optics is patterned after optimistic systems. It sees an attestation of some data, and accepts it as valid after a timer elapses. While the timer is running, honest participants have a chance to respond to the data and/or submit fraud proofs.
Unlike most optimistic systems, Optics must work on multiple chains. This means that certain types of fraud can't be objectively proven on the receiving chain. For example, it can't know which messages the home chain intended to send and therefore can't check message validity.
However, they can be proven on the home chain, which means participants can be bonded and fraudulent messages can always result in slashing. In addition, all off-chain observers can be immediately convinced of fraud (as they can check the home chain). This means that the validity of a message sent by Optics is not 100% guaranteed. Instead, Optics guarantees the following:
1) Fraud is costly
2) All users can learn about fraud
3) All users can respond to the fraudulent message before it is accepted
In other words, rather than using a globally verifiable fraud proof, Optics relies on local verification by participants. This tradeoff allows Optics to save 90% on gas fees compared to pessimistic relays, while still maintaining a high degree of security.
## Building Intuition
Optics works something like a notary service. The home chain produces a document (the message tree) that needs notarization. A notary (the updater) is contracted to sign it. The notary can produce a fraudulent copy, but they will be punished by having their bond and license publicly revoked. When this happens, everyone relying on the notary learns that the notary is malicious. All the notary's customers can immediately block the notary and prevent any malicious access to their accounts.
## Technical description
Optics creates an authenticated data structure on a home chain, and replays updates to that data structure on any number of replicas. As a result, the home chain and all replicas will agree on the state of the data structure. By embedding data ("messages") in this data structure we can propagate it between chains with a high degree of confidence.
The home chain enforces rules on the creation of this data structure. In the current design, this data structure is a sparse merkle tree based on the design used in the eth2 deposit contract. This tree commits to the vector of all previous messages. The home chain enforces an addressing and message scheme for messages and calculates the tree root. This root will be propagated to the replicas. The home chain maintains a queue of roots (one for each message).
The home chain elects an "updater" that must attest to the state of the message tree. The updater places a bond on the home chain and is required to periodically sign attestations (updates or `U`). Each attestation contains the root from the previous attestation (`U_prev`), and a new root (`U_new)`.
The home chain slashes when it sees two conflicting updates (`U_i` and `U_i'` where `U_i_prev == U_i'_prev && U_i_new != U_i'_new`) or a single update where `U_new` is not an element of the queue. The new root MUST be a member of the queue. E.g a list of updates `U_1...U_i` should follow the form `[(A, B), (B, C), (C, D)...]`.
Semantically, updates represent a batch commitment to the messages between the two roots. Updates contain one or more messages that ought to be propagated to the replica chain. Updates may occur at any frequency, as often as once per message. Because updates are chain-independent, any home chain update may be presented to any replica. And any replica update may be presented to the home chain. In other words, data availability of signed updates is guaranteed by each chain.
Before accepting an update, a replica places it into a queue of pending updates. Each update must wait for some time parameter before being accepted. While a replica can't know that an update is certainly valid, the waiting system guarantees that fraud is publicly visible **on the home chain** before being accepted by the replica. In other words, the security guarantee of the system is that all frauds may be published by any participant, all published frauds may be slashed, and all participants have a window to react to any fraud. Therefore updates that are **not** blacklisted by participants are sufficiently trustworthy for the replica to accept.

@ -1,227 +0,0 @@
# Agent Operations
## Deployment Environments
There will exist several logical deployments of Optics to enable us to test new code/logic before releasing it to Mainnet. Each environment encompasses the various Home/Replica contracts deployed to many blockchains, as well as the agent deployments and their associated account secrets.
The environments have various purposes and can be described as follows:
### Development
Purpose: Allows us to test changes to contracts and agents. *Bugs should be found here.*
- Deployed against testnets
- Agent Accounts: HexKeys stored in a secret manager for ease of rotation/access
- Agent Infrastructure: Optics core team will operate agent infrastructure for this.
- Node Infrastructure: Forno/Infura
- Agent Deployments: Automatic, continuous deployment
- Contract Deployments: Automatic, with human intervention required for updating the **upgradeBeacon**.
**Current Dev Contract Deployment:**
[development](../rust/config/development/)
### Staging
Purpose: Allows us to test the full-stack integration, specifically surrounding the KMS access control and federated secret management. *Issues with process should be found here.*
- Deployed against testnets, mirrors Mainnet deployment.
- Agent Accounts: KMS-provisioned keys
- Agent Infrastructure: Agent operations will be decentralized
- Node Infrastructure: Node infrastructure will be decentralized
- Agent Deployments: Determined by whoever is running the agents
- Contract Deployments: Automatic, with human intervention required for updating the **upgradeBeacon**.
**Current Staging Contract Deployment:**
[staging](../rust/config/staging/)
### Production
Purpose: Where the magic happens, **things should not break here.**
- Deployed against Mainnets
- Agent Accounts: KMS-provisioned keys
- Agent Infrastructure: Agent operations will be decentralized
- Node Infrastructure: Node infrastructure will be decentralized
- Agent Deployments: Determined by whoever is running the agents
- Contract Deployments: ***Manual*** - Existing tooling can be used, but deploys will be gated and require approval as contract deployments are expensive on Mainnet.
**Current Production Contract Deployment:**
[mainnet](../rust/config/mainnet/)
## Key Material
Keys for Staging and Production environments will be stored in AWS KMS, which is a highly flexible solution in terms of granting access. It guarantees nobody will ever have access to the key material itself, while still allowing granular permissions over access to remote signing.
At the outset, the Optics team will have full control over agent keys, and any contracted party will simply be granted access through existing IAM tooling/roles.
### Provision KMS Keys
There exists a script in this repository (`rust/provision_kms_keys.py`) that facilitates KMS key provisioning for agent roles.
The script will produce a single set of keys per "environment." Where an __environment__ is a logical set of smart contract deployments. By default there are two environments configured, `staging` and `production` where `staging` is testnet deployments of the contracts and `production` corresponds to mainnet deployments.
The current strategy, in order to reduce complexity, is to use the same keys for transaction signing on both Celo and Ethereum networks. Should you desire, the key names to be provisioned can be modified such that the script creates unique keys per-network. Ex:
```python
# Agent Keys
required_keys = [
"watcher-signer-alfajores",
"watcher-attestation-alfajores",
"watcher-signer-kovan",
"watcher-attestation-kovan",
"updater-signer-alfajores",
"updater-attestation-alfajores",
"updater-signer-kovan",
"updater-attestation-kovan",
"processor-signer-alfajores",
"processor-signer-kovan",
"relayer-signer-alfajores",
"relayer-signer-kovan"
]
```
#### Run the Key Provisioning Script
```bash
AWS_ACCESS_KEY_ID=accesskey AWS_SECRET_ACCESS_KEY=secretkey python3 provision_kms_keys.py
```
If the required keys are not present, the script will generate them. If they keys _are_ present, their information will be fetched and displayed non-destructively.
Upon successful operation, the script will output a table of the required keys, their ARNs, ETH addresses (for funding the accounts), and their regions.
#### Provision IAM Policies and Users
This is an opinionated setup that works for most general agent operations use-cases. The same permissions boundaries can be achieved through different means, like using only [Key Policies](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
Background Reading/Documentation:
- [KMS Policy Conditions](https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.htm)
- [KMS Policy Examples](https://docs.aws.amazon.com/kms/latest/developerguide/customer-managed-policies.html)
- [CMK Alias Authorization](https://docs.aws.amazon.com/kms/latest/developerguide/alias-authorization.html)
The following sequence describes how to set up IAM policies staging and production deployments.
- Create two users
- optics-signer-staging
- optics-signer-production
- kms-admin
- Save IAM credential CSV
- Create staging signer policies
- staging-processor-signer
- staging-relayer-signer
- staging-updater-signer
- staging-watcher-signer
- With the following policy, modified appropriately:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "OpticsStagingPolicy",
"Effect": "Allow",
"Action": [
"kms:GetPublicKey",
"kms:Sign"
],
"Resource": "arn:aws:kms:*:11111111111:key/*",
"Condition": {
"ForAnyValue:StringLike": {
"kms:ResourceAliases": "alias/staging-processor*"
}
}
}
]
}
```
- production-processor-signer
- production-relayer-signer
- production-updater-signer
- production-watcher-signer
- With the following policy, modified appropriately:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "OpticsProductionPolicy",
"Effect": "Allow",
"Action": [
"kms:GetPublicKey",
"kms:Sign"
],
"Resource": "arn:aws:kms:*:11111111111:key/*",
"Condition": {
"ForAnyValue:StringLike": {
"kms:ResourceAliases": "alias/production-processor*"
}
}
}
]
}
```
- Create kms-admin policy
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "KMSAdminPolicy",
"Effect": "Allow",
"Action": [
"kms:DescribeCustomKeyStores",
"kms:ListKeys",
"kms:DeleteCustomKeyStore",
"kms:GenerateRandom",
"kms:UpdateCustomKeyStore",
"kms:ListAliases",
"kms:DisconnectCustomKeyStore",
"kms:CreateKey",
"kms:ConnectCustomKeyStore",
"kms:CreateCustomKeyStore"
],
"Resource": "*"
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": "kms:*",
"Resource": [
"arn:aws:kms:*:756467427867:alias/*",
"arn:aws:kms:*:756467427867:key/*"
]
}
]
}
```
- Create IAM groups
- staging-signer
- production-signer
- kms-admin
- Add previously created users to the corresponding groups
- optics-signer-staging -> staging-signer
- opticics-signer-production -> production-signer
- kms-admin -> kms-admin
## Funding Addresses
Each agent should be configured with a unique wallet to be used to signing transactions and paying gas. This section describes the process of funding these signer wallets.
Note: It is currently inadvisable to to run multiple Agent setups with the same set of Transaction Signers.
### Steps
1. Generate KMS keys using instructions from the previous section.
2. Enumerate Signer Addresses via the table included as part of the output of `provision_kms_keys.py`, or via whatever method you used to generate keys.
3. Send individual funding transactions to each address
- Note: 500 ETH should be sufficient for testnet addresses.
4. Edit deployment config to match new signers

@ -1,78 +0,0 @@
# Developing the Agents
## Configuration
Agents read settings from the config files and/or env.
Config files are loaded from `rust/config/default` unless specified otherwise. Currently deployment config directories are labeled by the timestamp at which they were deployed
Configuration key/value pairs are loaded in the following order, with later sources taking precedence:
1. The config file specified by the `RUN_ENV` and `BASE_CONFIG` env vars. `$RUN_ENV/$BASE_CONFIG`
2. The config file specified by the `RUN_ENV` env var and the agent's name. `$RUN_ENV/{agent}-partial.json`.
E.g. `$RUN_ENV/updater-partial.json`
3. Configuration env vars with the prefix `OPT_BASE` intended to be shared by multiple agents in the same environment
E.g. `export OPT_BASE_REPLICAS_KOVAN_DOMAIN=3000`
4. Configuration env vars with the prefix `OPT_{agent name}` intended to be used by a specific agent.
E.g. `export OPT_KATHY_CHAT_TYPE="static message"`
## Building an Agent for Development
For contributing to the Rust codebase, it is advantageous and preferable to build agents using your host dev environment. As mentioned in the previous section, configuration precedence is your friend here. You can specify the base config json to use, and then override variables via the environment.
Below is a sample `tmp.env` file with appropriate variables to run an agent instance against the development environment.
Note: You will need to fetch dev keys (or generate your own via a contract deployment) for this to work properly.
`tmp.env`:
```
RUN_ENV=1625169020727
OPT_BASE_TRACING_LEVEL=info
OPT_UPDATER_UPDATER_KEY=<HexKey>
OPT_UPDATER_DB=updaterdb
OPT_RELAYER_DB=relayerdb
OPT_KATHY_DB=kathydb
OPT_KATHY_SIGNERS_ALFAJORES_KEY=<HexKey>
OPT_UPDATER_SIGNERS_ALFAJORES_KEY=<HexKey>
OPT_RELAYER_SIGNERS_ALFAJORES_KEY=<HexKey>
OPT_PROCESSOR_SIGNERS_ALFAJORES_KEY=<HexKey>
OPT_KATHY_SIGNERS_KOVAN_KEY=<HexKey>
OPT_UPDATER_SIGNERS_KOVAN_KEY=<HexKey>
OPT_RELAYER_SIGNERS_KOVAN_KEY=<HexKey>
OPT_PROCESSOR_SIGNERS_KOVAN_KEY=<HexKey>
```
Lets walk through the variables here:
`RUN_ENV` - Specifies the config folder to load configuration from, defaults to `default`.
`OPT_BASE_TRACING_LEVEL` - Specifies the log level the agents should boot up with.
`OPT_UPDATER_UPDATER_KEY` - The Updater attestation key.
`OPT_<ROLE>_DB` - The <ROLE>-specific path to save the agent database, setting individual locations here allows one to run multiple instances of an agent at once without them stepping on one-another.
`OPT_<ROLE>_SIGNERS_<NETWORK>_KEY` - The <ROLE>-specific transaction key to use when signing transactions on <NETWORK>.
For a full list of potentially useful common environment variables, check out the Agent Helm Chart's [ConfigMap](https://github.com/celo-org/optics-monorepo/blob/main/rust/helm/optics-agent/templates/configmap.yaml#L8-L34)
Agents also have role-specific environment variables in their StatefulSet definitions:
- [Updater](https://github.com/celo-org/optics-monorepo/blob/main/rust/helm/optics-agent/templates/updater-statefulset.yaml#L54-L89)
- [Relayer](https://github.com/celo-org/optics-monorepo/blob/main/rust/helm/optics-agent/templates/relayer-statefulset.yaml#L54-L74)
- [Processor](https://github.com/celo-org/optics-monorepo/blob/main/rust/helm/optics-agent/templates/processor-statefulset.yaml#L54-L74)
- [Kathy](https://github.com/celo-org/optics-monorepo/blob/main/rust/helm/optics-agent/templates/kathy-statefulset.yaml#L54-L74)
To run an agent, you can use the following command:
`BASE_CONFIG=kovan_config.json env $(cat ../tmp.env | xargs) cargo run --bin <AGENT>`
This will build the codebase and run the specified `<AGENT>` binary using the provided environment variables.
## Production Builds
It is important when making changes to the Rust codebase, to ensure the Docker build used in production environments still works. You can check this automatically in CI as it is built on every PR ([see docker workflow here](https://github.com/celo-org/optics-monorepo/blob/main/.github/workflows/docker.yml)), however you can check it much faster usually by attempting to build it locally.
You can build the docker image by running the following script in the `rust` directory:
`./build.sh latest`
If that goes smoothly, you can rest assured it will most likely also work in CI.

@ -1,86 +0,0 @@
# Optics Architecture
## Components
![Optics Architecture Diagram](./images/Optics-Architecture.png)
Optics has several logical components:
- Home - The on-chain contract responsible for producing the message tree
- Replica - The on-chain contract responsible for replicating the message root on some other chain
- Updater - The off-chain participant responsible for submitting updates to the home chain
- Watcher - The off-chain participant responsible for observing a replica, and submitting fraud proofs to the home chain
- Relayer - The off-chain participant responsible for submitting updates to a replica
- Processor - The off-chain participant responsible for causing messages to be processed
### On-chain (Contracts)
#### Home
The home contract is responsible for managing production of the message tree and holding custody of the updater bond. It performs the following functions:
1. Expose a "send message" API to other contracts on the home chain
2. Enforce the message format
3. Commit messages to the message tree
4. Maintain a queue of tree roots
5. Hold the updater bond
6. Slash on double-update proofs (not implemented)
7. Slash on improper update proofs (not implemented)
8. Future: manage updater rotation/bond
9. Future: recover from `FAILED` state
#### Replica
The replica contract is responsible for managing optimistic replication and dispatching messages to end recipients. It performs the following functions:
1. Maintain a queue of pending updates
2. Finalize updates as their timeouts elapse
3. Accept double-update proofs
4. Validate message proofs
5. Enforce the message format
6. Ensure messages are processed in order
7. Dispatch messages to their destination
8. Expose a "disconnect" feature
9. Future: recover from `FAILED` state
### Off-chain (Agents)
#### Updater
The updater is responsible for signing attestations of new roots. It is an off-chain actor that does the following:
1. Observe the home chain contract
2. Sign attestations to new roots
3. Publish the signed attestation to the home chain
4. Future: manage Updater bond
#### Watcher
The watcher observes the Updater's interactions with the Home contract (by watching the Home contract) and reacts to malicious or faulty attestations. It also observes any number of replicas to ensure the Updater does not bypass the Home and go straight to a replica. It is an off-chain actor that does the following:
1. Observe the home
2. Observe 1 or more replicas
3. Maintain a DB of seen updates
4. Submit double-update proofs
5. Submit invalid update proofs
6. If configured, issue an emergency halt transaction
#### Relayer
The relayer forwards updates from the home to one or more replicas. It is an off-chain actor that does the following:
1. Observe the home
2. Observe 1 or more replicas
3. Polls home for new signed updates (since replica's current root) and submits them to replica
4. Polls replica for confirmable updates (that have passed their optimistic time window) and confirms if available (updating replica's current root)
#### Processor
The processor proves the validity of pending messages and sends them to end recipients. It is an off-chain actor that does the following:
1. Observe the home
2. Generate and submit merkle proofs for pending (unproven) messages
3. Maintain local merkle tree with all leaves
4. Observe 1 or more replicas
5. Maintain list of messages corresponding to each leaf
6. Dispatch proven messages to end recipients

@ -1,70 +0,0 @@
# Contributing to Optics
## Signing Commits
💡 Please set up commit signing: [See docs here](https://docs.github.com/en/github/authenticating-to-github/managing-commit-signature-verification)
Sign them!
## Set pulls to fast forward only
💡 Please read [this article](https://blog.dnsimple.com/2019/01/two-years-of-squash-merge/) about squash merging
- `git config pull.ff only`
- Consider setting this globally because it's way better this way
## **Naming Branches**
We want to know who is working on a branch, and what it does. Please use this format:
- `name/short-description`
- Examples:
- `prestwich/refactor-home`
- `erinhales/merkle-tests`
- `pranay/relayer-config`
## **Commit messages**
We want to know what a commit will do and be able to skim the commit list for specific things. Please add a short 1-word tag to the front, and a short sentence that fills in the blank "If applied, this commit will __________"
- Examples:
- `docs: improve rustdoc on the Relay run function`
- `feature: add gas escalator configuration to optics-base`
- `test: add test vector JSON files for the merkle trees`
For large commits, please add a commit body with a longer description, and bullet points describing key changes.
## **PRs**
Please name the PR with a short sentence that fills in the blank "If applied, this PR will _________". To be merged into `main` a PR must pass CI in order to be merged.
Please use the [Github Draft PR](https://github.blog/2019-02-14-introducing-draft-pull-requests/) feature for WIP PRs. When ready for review, assign at least one reviewer from the core team. PRs should be reviewed by at least 1 other person.
PRs should **ALWAYS** be [merged by squashing the branch](https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/#:~:text=It's%20simple%20%E2%80%93%20before%20you%20merge,Here's%20a%20breakdown.&text=Make%20changes%20as%20needed%20with%20as%20many%20commits%20that%20you%20need%20to.).
## Merging PRs
PRs can be merged once the author says it's ready and one core team-member has signed off on the changes.
Before approving and merging please do the following:
1. Ensure that you feel comfortable with the changes being made
2. If an existing `Request Changes` review exists, ask the reviewer to re-review
3. Pull the branch locally
4. Run the pre-commit script
5. Ensure that the build and tests pass
6. Give an approval
7. Ensure that any issues the PR addresses are properly linked
8. If any changes are needed to local environments (e.g. re-installing the build script, or installing new tooling) please record it in the documentation folder.
9. Resolve conficts by rebasing onto target branch
## Workflows
### Debugging
Solidity Install
1. Click on workflow to see where the install is failing
2. Delete `node_modules` and `package-lock.json` in affected folder
3. Reinstall locally
4. Push to branch

@ -1,109 +0,0 @@
# Optics Failure Cases
Optics is a robust system, resistant to all sorts of problems. However, there are a set of failure cases that require human intervention and need to be enumerated
## Agent State/Config
### Updater
- *Two `updater`s deployed with the same config*
- (See Double Update)
- *Extended updater downtime*
- **Effect:**
- Updates stop being sent for a period of time
- **Mitigation:**
- `Updater` Rotation (not implemented)
- *Fraudulent `updater`*
- **Effect:**
- Invalid or fraudulent update is sent
- **Mitigation:**
- `Watcher` detects fraud, submits fraud proof (see Improper Update)
### Relayer
- *`relayer` "relays" the same update more than once*
- **Effect:**
- Only the first one works
- Subsequent transactions are rejected by the replicas
- **Mitigation:**
- Mempool scanning
- "is there a tx in the mempool already that does what I want to do?"
If so, do nothing, pick another message to process.
- __If minimizing gas use:__ Increase polling interval (check less often)
### Processor
- *`processor` "processes" the same message more than once*
- **Effect:**
- Only the first one works
- Subsequent transactions are rejected by the smart contracts
### Watcher
- *Watcher and Fraudulent Updater Collude*
- **Effect:**
- Fraud is possible
- **Mitigation:**
- Distribute watcher operations to disparate entities. Anyone can run a watcher.
### General
- *Transaction Wallets Empty*
- **Effect:**
- Transactions cease to be sent
- **Mitigation:**
- Monitor and top-up wallets on a regular basis
## Contract State
- *Double Update*
- Happens if `Updater` (single key), submits two updates building off the "old root" with different "new root"
- If two `updater`s were polling often but message volume was low, would likely result in the "same update"
- If two `updater`s were polling often but message volume was high, would likely result in a "double update"
- Doesn't necessarily need to be the __two updaters__, edge case could occur where the updater is submitting a transaction, crashes, and then reboots and submits a double update
- **Effect:**
- Home and Replicas go into a **Failed** state (stops working)
- **Mitigation:**
- Agent code has the ability to check its Database for a signed update, check whether it is going to submit a double update, and prevent itself from doing so
- Need to improve things there
- Updater wait time
- `Updater` doesn't want to double-update, so it creates an update and sits on it for some interval. If still valid after the interval, submit. __(Reorg mitigation)__
- __"Just don't run multiple updaters with the same config"__
- *Improper Update*
- Should only occur if the chain has a "deep reorg" that is longer than the `Updater`'s __pause period__ OR if the `Updater` is actively committing fraud.
- **Effect:**
- `Home` goes into a **FAILED** state (stops working)
- No plan for dealing with this currently
- `Updater` gets slashed
- (not implemented currently)
- **Mitigation:**
- `Watcher`(s) unenroll `xapps`
- Humans look at the situation, determine if the `Updater` was committing fraud or just the victim of poor consensus environment.
## Network Environment
- *Network Partition*
- When multiple nodes split off on a fork and break consensus
- Especially bad if the `updater` is off on the least-power chain (results in __Improper Update__)
- **Effect:**
- Manifests as a double-update
- Manifests as an improper update
- Messages simply stop
- **Mitigation:**
- Pay attention and be on the right fork
- **Stop signing updates when this occurs!**
- Have a reliable mechanism for determining this is happening and pull the kill-switch.
- *PoW Chain Reorg (See Network Partition)*
- What happens when a __network partition__ ends
- **Mitigation:**
- *PoS Chain Reorg (See Network Partition)*
- Safety failure (BPs producing conflicting blocks)
- Liveness Failure (no new blocks, chain stops finalizing new blocks)
- **Effect:**
- Slows down finality
- Blocks stop being produced
- How would this manifest in Celo?
- Celo would stop producing blocks.
- Agents would __pause__ and sit there
- When agents see new blocks, they continue normal operations.

@ -1,154 +0,0 @@
# Frequently Asked Questions
-------
#### Q: Why does my recently-bridged token have a funny name like `0006648936.eb48`?
**A:**
In order to avoid sending redundant data like the token name and symbol with every message, the first time a bridged ERC-20 Token representation is deployed, metadata is pulled from the originating chain after initial deployment. This involves a round-trip between the replica and originating chain wherein data about name/symbol/decimals is synced.
This is expected behavior, and the explorer will update after a day or two.
#### Q: Why is the ERC-20 token placeholder name like that?
**A**
Example: `0006648936.eb48`
`0006648936.eb48` is the Optics domain ID for Ethereum and the last 4 letters of the token address on Ethereum
`6648936 == 0x657468` -- the utf8 encoding of 'eth'
USDC's address is `0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48`
Note the `eb48` at the end.
#### Q: What is the point of the Updater’s attestations?? Why are they necessary / helpful?
I am confused, because it seems like the Updater has very little discretion as to what messages should or should not be enqueued.. it has to sign a message that’s been added to the Home contract’s queue, and it can’t sign the most recent messages without also signing every message before it, so if it detects some kind of “invalid” or malicious message, the only optionality it has is to stop attesting to messages altogether, thereby halting the system entirely, right?
the updates to the state root are already calculated on-chain each time a message is Dispatched.. why can’t the Home contract just update the current state each time a message is Dispatched, not needing to bother with enqueuing intermediate state roots and waiting for an update to attest to the current state?
**A:**
the updater should have very little discretion. their job is to be a notary and to rubber-stamp something that has already objectively occurred. The Updater should sign roots regardless of the message content. There’s no concept of malicious or fraudulent messages on the Home chain. If someone calls `enqueue` on the Home contract, they want that message to be dispatched and the Updater should have little power to prevent thatThe Updater’s role isn’t to filter messages or worry about them at all. It’s to produce an `Update` that is cheap for all replicas and off-chain observers to verify, and to post a bond that can be slashed. The Replicas **cannot** read the Home’s `current`. They **require** the signed update from a known party in order to know that fraudulent `Update`s are expensive.
The reason for the Root Queue is to prevent the Updater from being slashed due to timing errors outside her control. What if the root changes while the signed Update transaction is in the transaction pool? If Home stored only the latest root, it might change after the signature is made, but before the TX is processed, resulting in slashing an honest Updater
- The updater does not need a copy of the tree
- The updater polls the `suggestUpdate` mechanism on the Home to get a valid update
- The updater does not update a local tree and attest the match
- The updater (and everyone else) can assume the Home calculated the tree root correctly
So, the purpose of the Updater is simply providing a signature that the Replica chains can verify. The Replica chains know that if they can verify the Updater’s signature on a new root, that it is the truth of what happened on the Home chain — or else the Updater will lose their bonded stake on the Home chain.
-------
#### Q: does the Updater need to
- maintain an off-chain copy of the sparse merkle tree
- No, it relies on the contract
- parse transactions submitted to the Home contract and Dispatch events emitted on-chain to learn about new messages
- No. It only needs to poll `suggestUpdate`
- update its local copy of the merkle tree with these new messages
- No, it relies on the contract
- calculate the tree root of its local copy of the merkle tree
- No, it relies on the contract
- compare its locally calculated tree root to the one generated on-chain
- No, it relies on the contract
- attest that the two match?
- No, it relies on the contract
I am definitely missing some insight into what function the Updater serves in the system
- It simply attests to the tree that the contract produced
-------
#### Q: How does a message get processed?
**A:**
1. Enqueue on [Home](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-core/contracts/Home.sol)
2. Updater attests
3. Relayer relays the attestation
4. Processor submits the proof and the message
5. Replica [dispatches message](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-core/contracts/Replica.sol#L202-L239) to the recipient contract
-------
#### Q: What happens if the Updater commits fraud?
**A:**
1. ****The watcher is able to detect fraud
2. The watcher notifies the Home contract, which halts (and slash the Updater's bond)
3. The watcher warns the xApps of the fraudulent update by calling `unenrollReplica`
- [link](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-core/contracts/XAppConnectionManager.sol#L38-L42)
4. When the fraudulent message is received, the xApp rejects it, because the Replica has been unenrolled
-------
#### Q: What happens if the Updater equivocates (signs 2 conflicting updates)?
**A:**
1. The watcher is able to detect this
2. The watcher notifies the Home contract, which halts (and slash the Updater's bond)
3. The watcher notifies each Replica contract, which halts
-------
#### Q: Why would an Updater submit a double update?
**A:** The updater is trying to send different (fraudulent) inbound messages to 2 different chains. E.g the updater wants to move the same $100 to 2 different chains.
-------
#### Q: If all double updates are fraud, why do we handle them separately?
**A:** Because unlike regular fraud, a double update can be detected by all chains. So we can *certainly* defend against it everywhere. Since we have a strong defense against this type of fraud, we just ban it outright.
-------
#### Q: What do each of the agents do?
**A:** See [Optics Architecture](./architecture.md)
-------
#### Q: How do xApps know what channels to listen to? And which Home to send messages to?
**A:** xApps "know" both of these things by querying the xAppConnectionManager.
The xAppConnectionManager is a contract that stores a registry of the address of the Home contract and the Replica contracts on the local chain. xApps query this information from the xAppConnectionManager.
When an external contract attempts to trigger a xApp, it queries the xAppConnectionManager to ensure that the contract is a verified Replica - this way, the xApp "knows" that the message is coming from Optics (rather than a malicious smart contract fabricating Optics messages).
When a xApp needs to send a message via Optics, it queries the xAppConnectionManager to get the address of the Home contract, where it will send the message.
The xAppConnectionManager's registry is maintained by permissioned agents in the Optics system. Enrolling new Replicas or changing the Home address must be done by Optics Governance; Un-enrolling Replicas is performed by either Governance or a permissioned role called a Watcher.
Watchers are permissioned for specific chains, and responsible for "watching" for fraud on the Home contract of that chain. If fraud occurs on the Home, the Watcher must sign a message attesting to the fraud, which can be submitted to the xAppConnectionManager to un-enroll the Replica. In this way, xApps no longer accept messages from Replicas whose Home contracts have been proven fraudulent.
In the future, Watchers will be bonded on the Home chain that they watch, to incentivize timely and consistent submission of their signature if fraud occurs, thereby reducing the trust that must be placed in the Watcher.
-------
#### Q: What is a domain? Why do use domain numbers everywhere?
**A:** The domain is an identifier for a chain (or other execution environment). It is a number, and eventually we'll have a registry for these. We tag each message with an origin domain so that the recipient knows where it came from. And a destination domain so that the relayer and processor know where to deliver it to (and so the Replica knows that it ought to deliver it). This also lets contracts permission specific people or contracts from other chains.
-------
#### Q: Why do we use 32-byte addresses instead of Ethereum-style 20-byte addresses?
**A:** This is future-proofing. We want to go to non-EVM chains in the future. And 20-bytes won't be enough there.
-------
#### Q: Why put all messages in the same Home? Why not use a different Home per destination chain?
**A:** We do this so that there's no action on the Home chain when we set up a new Replica. There's no action needed in the Home or on the home chain to make a new channel. The Home can be totally naive of the channels it's sending over

@ -1,170 +0,0 @@
# Cross-Chain Governance
## Pre-Requisite Reading
- [Optics: OPTimistic Interchain Communication](../optics.md)
## Summary
### Purpose
This document describes **a governable system for executing permissioned actions across chains**.
We aim to clearly describe
- **what** contracts comprise the system for calling permissioned functions across chains
- **which** functions will be delegated to this system at launch, and
- (directionally) **who** will have permission to call these functions at launch and in the future
### Out of Scope
This document does NOT describe a system for **how** governance actions will be proposed, voted on, and/or approved before being executed.
It does not describe how contract upgrades will be written, reviewed, verified.
### Overview
We define a role, `governor`, with the power to perform permissioned actions across chains. In order to empower the `governor`, we deploy a cross-chain application comprised of a `GovernanceRouter` contract on each chain.
Each `GovernanceRouter` can be delegated control over an arbitrary set of permissioned functions on its local chain. The only way to access the permissioned functionality is to call the function via the `GovernanceRouter` contract.
Each `GovernanceRouter` is programmed to accept messages ***only*** from the `governor`, which is deployed on only one chain. The `governor` may call the contract locally (if it is deployed on the same chain), or it may send it messages remotely via Optics. Because of its exclusive power over the `GovernanceRouter` contracts, the `governor` has exclusive rights to perform **all** of the permissioned roles that are delegated to the `GovernanceRouter` on each chain.
The system receives orders from the `governor` and carries out their effects across chains; it is agnostic to how the `governor` chooses to operate. This maintains flexibility to design the governance proposal process in the future.
At launch, the core functionality that will be delegated to the `GovernanceRouter` on each chain is the power to upgrade the implementation of the `Home` and `Replica` contracts. This way, the `governor` will have the power to conduct upgrades of the Optics system on every chain. More details on the upgradability system can be found [here](../upgrade-setup.md).
At launch, the `governor` will be a multisig of trusted team and community members. In the near future, the `governor` role will most likely be transferred to a more fully-featured set of contracts capable of accepting proposals, tallying votes, and executing successful proposals.
## Message Flow Diagram
<img src="../images/Governance-XApp.jpeg" alt="Governance xApp Diagram" style="max-width:400px;" />
1. `governor` sends message to its local `GovernanceRouter`
2. `GovernanceRouter` dispatches the message...
1. if the recipient is local, to the recipient directly (→ process finished)
2. if the recipient is remote, via Optics to the local Home contract (→ continue to 3)
3. Message is relayed from local `Home` to remote `Replica` via Optics
4. `Replica` dispatches message to the remote `GovernanceRouter`
5. `GovernanceRouter` dispatched the message directly to the local recipient
**Note on message recipient:**
- the recipient may be a `Replica` or `Home` contract
- it may be an `UpgradeBeacon` that controls the implementation of `Replica` or `Home`
- it may be any other app
For simplicity & clarity to show the message flow, this diagram represents the recipient as a generic "App"
## Specification
### Glossary of Terms
- **xApp** - Cross-Chain Application
- **role**
- an address stored in a smart contract's state that specifies an entity with special permissions on the contract
- permission to call certain functions is usually implemented using a function modifier that requires that the caller of the function is one of the roles with permission to call it; all contract calls sent from callers that do not have valid permission will revert
- *example*: `owner` is the **role** set on all [Ownable](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/access/Ownable.sol) contracts upon deployment; the `owner` **role** has exclusive permission to call functions with the `onlyOwner` modifier
- **permissioned function**
- any smart contract function that restricts callers of the function to a certain role or roles
- *example*: functions using the `onlyOwner` modifier on [Ownable](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/access/Ownable.sol) contracts
- **permissioned call** — a call to a **permissioned function**
- **governor chain**
- the chain on which the `governor` is deployed
- the chain whose `GovernanceRouter` is also the special `GovernorRouter` which can *send* messages; all `GovernanceRouters` on other chains can only *receive* governance messages
### On-Chain Components
#### **GovernanceRouter**
- xApp designed to perform permissioned roles on core Optics contracts on all chains
- State Variables
- **governor** state variable
- if the `governor` is local, `governor` will be set to the EVM address of the `governor`
- if the `governor` is remote, `governor` will be `address(0)`
- **governorDomain** state variable
- the Optics domain of the **governor chain**
- stored as a state variable on all `GovernanceRouters`; should be the same on all `GovernanceRouters`; always non-zero
- if the `governor` is local, `governorDomain` is equal to the `originDomain` of the local `Home` contract
- if the `governor` is remote, `governorDomain` is equal to the `originDomain` of the remote `Home` contract
- equal to the `originDomain` of the local `Home` contract on the chain of the `GovernorRouter`
- used by all `GovernanceRouters` to determine whether an incoming Optics message was sent from the `GovernorRouter`
- if the message is from the `GovernorRouter`, the `GovernanceRouter` will handle the incoming message
- if not, it will revert
- **routers** state variable
- a mapping of domain → address of the remote `GovernanceRouter` on every other chain
- **domains** state variable
- an array of all domains that are registered in `routers`
- used to loop through and message all other chains when taking governance actions
- there is the possibility that some domains in the array are null (if a chain has been de-registered)
- **GovernorRouter**
- the special `GovernanceRouter` that has *permission to send* governance messages to all other `GovernanceRouters`
- the `GovernanceRouter` on the **governor chain**
#### **Governor**
- via the `GovernanceRouter` system, it has the unique ability to call permissioned functions on **any contract** on **any chain** that transfers permission to the local `GovernanceRouter`
- the **role** with permission to send messages to the `GovernorRouter`
- the `GovernorRouter` has exclusive permission to send messages via Optics to all other `GovernanceRouters`
- the `GovernanceRouters` can have arbitrary permissions delegated to them by any contract on their local chain
- therefore, the `governor` is the entity with the power to call any **permissioned function** delegated to any `GovernanceRouter` on any chain
- there is only one `governor` throughout the Optics system; it can be deployed on any chain
- the `governor` role can always be transferred to another contract, on the same chain **or** a different remote chain
- stored as a state variable on `GovernanceRouters`; set to zero on all `GovernanceRouters` except on the **governor chain**
- **Any contract** on **any chain** that wishes for this governance system to have discretion to call a set of its functions can create a role & a function modifier giving exclusive permission to that role to call the function(s) (similar pattern to Ownable). The contract must then set the local `GovernanceRouter` to the permissioned role, which — by extension — gives the `governor` exclusive permission to call those functions (regardless of whether the `governor` is remote or local)
### Failure States
If there is fraud on the Optics `Home` contract on the **governor chain**, this is currently a "catastrophic failure state" — no further governance actions can be rolled out to remote chains; we must create a plan to recover the system in this case (See [#128](https://github.com/celo-org/optics-monorepo/issues/128) for more details.)
---
## Message Types
### Executing (Arbitrary) Calls
1. **for each chain**, the `governor` constructs the array of `(to, data)` calls to the permissioned functions on the contracts that will perform the upgrades on that chain
2. the `governor` sends a transaction to the `GovernanceRouter.callRemote` function on its local the , passing in the `domain` of the remote chain and the array of `(to, data)` calls of transactions to execute on that chain
3. the local `GovernanceRouter` constructs an Optics-compatible message from the array of calls, addresses the message to the remote `GovernanceRouter`, and sends the message to the local `Home` contract
4. the message is relayed from the local `Home` to the remote `Replica` contract on the specified `domain`
5. the `Replica` dispatches the message to the specified recipient, which is the local `GovernanceRouter`
6. the `GovernanceRouter` parses the message to decode the array of `(to, data)` calls
7. the `GovernanceRouter` uses low-level call to execute each of the transactions in the array within the local chain
### **Transferring Governor**
#### **Possible State Transitions**
1. called by the local owner to transfer ownership to another local owner (`domain` does not change, `owner` changes to a new `bytes32` address)
2. called by the local owner to transfer ownership to a remote owner (`domain` changes to the remote, `owner` changes from a non-zero `bytes32` to `bytes32(0)`)
3. called by a remote owner to transfer ownership to a local owner (`domain` changes to the local domain, `owner` changes from `bytes32(0)` to a non-zero `bytes32`)
4. called by a remote owner to transfer ownership to another remote owner (`domain` changes to the new remote owner, `owner` remains `bytes32(0)`)
### Enrolling a Router
- used when a new chain is added to Optics after we've already set up the system and transferred governorship
- add a new domain → address mapping to the `routers` mapping on every other `GovernanceRouter`
---
## Functionality at Launch
### Permissioned Roles
At launch, the `GovernanceRouter` system **will have the following permissions**:
1. upgrade the implementation of `Home` (via `UpgradeBeacon` pattern)
2. upgrade the implementation of all `Replicas` (via 1-to-N `UpgradeBeacon` pattern)
3. upgrade the implementation of itself (via `UpgradeBeacon` pattern)
The `GovernanceRouter` **will NOT have permission** to:
- un-enroll a `Replica` from the `UsingOptics` contract, which will require a specialized role that can act quickly
### Governor
The flexibility of this system will support a move to progressive decentralization.
Initially, the `governor` will most likely be a multisig controlled by trusted team and community members
Later, the `governor` role will most likely be transferred to a decentralized governance contract

Binary file not shown.

Before

Width:  |  Height:  |  Size: 558 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 448 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 225 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 295 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 970 KiB

@ -1,53 +0,0 @@
# Upgrade Setup
We will use the `UpgradeBeacon` pattern to implement three upgradable contract types: `Home`, `Replica`, and `GovernanceRouter`.
Each upgradable contract will have:
- **Proxy**
- the permanent address of the contract that external entities interact with
- holds the storage of the contract
- uses the logic specified by `Implementation`
- uses `delegatecall` to forward contract calls from `Proxy``UpgradeBeacon``Implementation`
- **UpgradeBeacon**
- stores the (mutable) address of the `Implementation`
- forwards `delegatecalls` to the `Implementation`
- accepts new `Implementation` addresses from its `Controller` (thereby performing upgrades for all `Proxies`)
- **Implementation**
- specifies the logic of the contract
- code is the same as a normal, non-upgradable contract implementation (though it should use upgrade-safe storage)
Each of the three `UpgradeBeacon` contracts will share a the same `Controller` — the contract with the power to perform upgrades.
The **Controller** contract will have two roles: controller and saver.
- `controller`
- is a transferrable role that should be performing upgrades in almost every case.
- will be set to the `GovernanceRouter Proxy`, so that the Governance xApp ultimately controls the upgrades of the entire system. Note that this creates a circular dependency which makes upgrades of the `GovernanceRouter Implementation` particularly sensitive.
- `saver`
- is a transferrable(?) role that is responsible for recovering the system in a catastrophic failure case. Actions performed by the `saver` will be subject to a timelock enforced by the Controller contract.
- will be set to a multisig contract where the signatories are a set of known / trusted community members. the signatories for the `saver` multisig be the same on every chain
## Diagrams
![Upgrade Setup Diagram 1](./images/Upgrade-Setup-1.png)
![Upgrade Setup Diagram 2](./images/Upgrade-Setup-2.png)
## Executing Upgrades: Flow / Process
### Home or Replica
TODO
### Governance Router
TODO (similar to Home or Replica but extra-sensitive b.c. potential to mess up upgradability function)
## Appendix
TODO
### Deprecated Diagram
![Upgrade Setup Diagram - DEPRECATED](./images/Upgrade-Setup-DEPRECATED.png)

@ -1,11 +0,0 @@
# Code Versioning
Due to the dependency structure in the codebase, it is advantageous to version the Contracts and then pin everything else in the monorepo to the same versioning.
**Versioning Scheme:**
- Monotonically increasing Integer Versions corresponding to implementation contract deployments
- ex. 1, 2, 3, etc.
- Monorepo is tagged with integer version upon major release
- The commit a release is associated with will contain agent/deployment code that is compatible with it
- Agents/build artifacts are versioned using global repo version

@ -1,87 +0,0 @@
# Developing Cross-Chain Applications
## Summary
Optics sends messages from one chain to another in the form of raw bytes. A cross-chain application that wishes to *use* Optics will need to define the rules for sending and receiving messages for its use case.
Each cross-chain application must implement its own messaging protocol. By convention, we call the contracts that implement this protocol the application's **Router contracts.** These Router contracts must:
- **maintain a permissioned set** of the contract(s) on remote chains from which it will accept messages via Optics — this could be a single owner of the application on one chain; it could be a registry of other applications implementing the same rules on various chains
- **encode messages in a standardized format**, so they can be decoded by the Router contract on the destination chain
- **handle messages** from remote Router contracts
- **dispatch messages** to remote Router contracts
By implementing these pieces of functionality within a Router contract and deploying it across multiple chains, we create a working cross-chain application using a common language and set of rules. Applications of this kind may use Optics as the cross-chain courier for sending and receiving messages to each other.
## Example Code
This repository has several examples one can use to build understanding around Cross-Chain Applications.
### xApp Template
[This is a template](https://github.com/celo-org/optics-monorepo/tree/main/solidity/optics-xapps/contracts/xapp-template) provided by the Optics team that shows the high-level components of an xApp, ready for one to fill in their own application logic and utilize an Optics channel for cross-chain communication.
To implement a xApp, define the actions you would like to execute across chains.
For each type of action,
- in the [xApp Router](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-xapps/contracts/xapp-template/RouterTemplate.sol)
- implement a function like doTypeA to initiate the action from one domain to another (add your own parameters and logic)
- implement a corresponding _handle function to receive, parse, and execute this type of message on the remote domain
- add logic to the handle function to route incoming messages to the appropriate _handle function
- in the [Message library](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-xapps/contracts/xapp-template/MessageTemplate.sol),
- implement functions to *format* the message to send to the other chain (encodes all necessary information for the action)
- implement functions to *parse* the message once it is received on the other chain (decode all necessary information for the action)
### Ping Pong xApp
**Important!** The Ping Pong xApp is for reference only. Please do not deploy!
[The PingPong xApp](https://github.com/celo-org/optics-monorepo/tree/main/solidity/optics-xapps/contracts/ping-pong) is capable of initiating PingPong "matches" between two chains. A match consists of "volleys" sent back-and-forth between the two chains via Optics.
The first volley in a match is always a Ping volley.
- When a Router receives a Ping volley, it returns a Pong.
- When a Router receives a Pong volley, it returns a Ping.
The Routers keep track of the number of volleys in a given match, and emit events for each Sent and Received volley so that spectators can watch.
### Token Bridge xApp
See the full-length [Token Bridge Documentation](#TODO) for in-depth details on Token Bridge operation and construction.
[Link to Contracts](https://github.com/celo-org/optics-monorepo/tree/main/solidity/optics-xapps/contracts/bridge)
### Cross-Chain Governance xApp
See the full-length [Optics Governance Documentation](#TODO) for in-depth details on Governance xApp operation and construction.
[Link to Contracts](https://github.com/celo-org/optics-monorepo/tree/main/solidity/optics-core/contracts/governance)
## Useful Links
- [xApp Developers Workshop @ EthCC 2021 by Anna Carroll](https://www.youtube.com/watch?v=E_zhTRsxWtw)
## Glossary of Terms
- **Local vs Remote**: in the context of discussing a particular contract, it is deployed on a particular chain. Contracts and assets on that chain are "local." Contracts and assets on another chain are "remote"
- e.g. Uniswap is deployed on Ethereum. Ethereum is the local chain. Celo is a remote chain
- e.g. there is a token deployed on Celo. There is a local `Home` contract (on Celo), and a local `Replica` contract (on Celo) receiving messages from Ethereum. There is a remote `Home` (on Ethereum) sending messages. There is a remote `Replica` (on Ethereum) receiving messages from Celo. There is a remote `Router` (on Ethereum) communicating with the local `Router` on Celo.
- **"Locally originating" vs "Remotely Originating"**: in the context of a token or asset in a specific contract, these terms denote whether the original canonical contract is deployed on the local chain, or on a remote chain
- e.g. cUSD originates on Celo. in the context of the Celo blockchain, cUSD is "of local origin" or "locally originating"; in the context of the Ethereum blockchain, cUSD is "of remote origin"
- e.g. Ether and WETH originate on Ethereum. When used in a Celo contract, they are "remotely originating" or "of remote origin"
- e.g. a `Router` receives a Transfer message for a remotely-originating asset. It finds the local contract that represents that asset. When it receives a message for a locally originating asset, it knows that it can find the original asset contract locally
- **Router Contract**: a contract that implements a cross-chain application by specifying the:
- **message format** - the bytes-encoded format of messages for the application
- **registry of remote Router contracts** that implement the same application on remote chains
- **rules & behavior for handling messages** sent via Optics by a registered Router contract on a remote chain
- **rules & behavior for dispatching messages** via Optics to a registered Router contract on a remote chain
- **Message**: bytes transferred via Optics that encode some application-specific instructions via a standardized set of rules
- **Instructions**: set of application-specific actions (e.g. "send 5 token X to 0x123...456 on chain Z" in the case of a Token Bridge); calls to functions on the Router contract
- **Handling Messages from Optics Channels**:
- receive bytes-encoded message from Optics (sent from a remote chain)
- enact or dispatch the instructions on the local chain
- local handler decodes the message into application-specific instructions
- **Dispatching Message to Optics Channels**:
- receive instructions on the local chain (via local users and contracts calling functions on the contract)
- encode the instructions into bytes using standardized message format
- dispatch the bytes-encoded message to Optics (to be sent to a remote chain)

@ -1,121 +0,0 @@
# Token Bridge xApp
## Summary
The Token Bridge xApp implements a bridge that is capable of sending tokens across blockchains.
Features:
- Ensures that circulating token supply remains constant across all chains.
## Protocol
### Handling Messages
- the BridgeRouter contract only accepts messages from other remote BridgeRouter contracts, which are registered by each BridgeRouter
- therefore, every message received follows the same "rules" that the local BridgeRouter expects
- for example, any tokens sent in a message are ensured to be valid, because the remote BridgeRouter sending the message should locally enforce that a user has custody before sending the message to the remote chain
- the messages from remote BridgeRouter contracts must be sent via Optics, dispatched by a local Replica contract, which are registered with the UsingOptics contract
- thus, the BridgeRouter depends on the UsingOptics contract for a valid registry of local Replicas
- if another chain has sent a token that's "native" to this chain, we send that token from the Router contract's escrow to the recipient on this chain
- if we're receiving a token that's not "native" to this chain,
- we check whether a representation token contract has already been deployed by the Router contract on this chain; if not, we deploy that representation token contract and add its address to the token registry
- we mint representation tokens on this chain and send them to the recipient
### Dispatching Messages
- **TODO**: describe rules — person must approve token to Router on local chain (if it's a native token) proving they have ownership over that token and can send to the native chain
- sending tokens
- the user uses ERC-20 `approve` to grant allowance for the tokens being sent to the local BridgeRouter contract
- the user calls send on the local BridgeRouter to transfer the tokens to a remote
- if the token being sent is "native" to the BridgeRouter's chain, the BridgeRouter contract holds the token in escrow
- if the token being sent is not "native" to the chain, then the local token is a representation token contract deployed by the BridgeRouter in the first place; the BridgeRouter contract burns the tokens before sending them to another chain
### Message Format
- **TODO**: specify how messages are encoded for this application
## Architecture
**BridgeRouter ([code](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-xapps/contracts/bridge/BridgeRouter.sol))**
- Receives incoming messages from local `Replica` contracts sending tokens from another chain
- Dispatches outgoing messages to local `Home` contract in order to send tokens to other chains
- Manages a registry of representation ERC-20 token contracts that it deploys on its local chain
- Maintains a registry of remote `BridgeRouter` contracts to
- authenticate that incoming messages come from a remote `BridgeRouter` contract
- properly address outgoing messages to remote `BridgeRouter` contracts
**TokenRegistry ([code](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-xapps/contracts/bridge/TokenRegistry.sol))**
- Responsible for deploying and keeping track of representation ERC-20 token contracts on this chain
- When a new token is transferred, deploys a new representation token contract on this chain, and stores a two-way mapping between the information of the original token contract & the address of the representation on this chain
- Inherited by the `BridgeRouter`, who uses this to make sure a representation of the token exists on this chain before minting/burning
**BridgeMessage library ([code](https://github.com/celo-org/optics-monorepo/blob/main/solidity/optics-xapps/contracts/bridge/BridgeMessage.sol))**
- Library for handling all the nitty gritty of encoding / decoding messages in a standardized way so they can be sent via Optics
## Message Flow
The logical steps and flow of information involved in sending tokens from one chain to another.
- **Chain A**
- User wants to send their tokens to Chain B
- If it's a native token, the user must first `approve` tokens to the local `BridgeRouter-A`
- User calls `send` on the local `BridgeRouter-A`
- If it's a native token, tokens are pulled from the User's wallet to `BridgeRouter-A` and held in escrow
- If it's a non-native token, tokens are burned from User's wallet by `BridgeRouter-A`
- *Note:* `BridgeRouter-A` can burn non-native tokens because the representative contract for the token on its non-native chain was originally deployed by `BridgeRouter-A` when it received a message sending the token from another chain. The router has administrative rights on representations
- `BridgeRouter-A` constructs a message to `BridgeRouter-B`
- `BridgeRouter-A` keeps a mapping of `BridgeRouter` contracts on other chains so it knows where to send the message on Chain B
- `BridgeRouter-A` calls `enqueue` on `Home-A` contract to send the message to Chain B
- **Off-Chain**
- Standard Optics behavior. Updater → Relayer → Processor
- Relayers see message on `Home-A`
- Relayers pass message to `Replica-A` on Chain B
- **Chain B**
- After waiting for the acceptance timeout, `Replica-A` processes the message and dispatches it to `BridgeRouter-B`
- `BridgeRouter-B` keeps a mapping `Replica` contracts that it trusts on the local chain. It uses this to authenticate that the incoming message came from chain A
- `BridgeRouter-B` keeps a mapping of `BridgeRouter` contracts on other chains, so it can authenticate that this message came from `BridgeRouter-A`
- `BridgeRouter-B` looks for the corresponding ERC-20 token contract in its registry, and deploys a new representative one if it doesn't already exist
- `BridgeRouter-B` sends the token to the recipient
- If it's a native token, `BridgeRouter-B` sends the tokens from the pool it's holding in escrow
- If it's a non-native token, `BridgeRouter-B` mints the token to the recipient (
- *Note:* `BridgeRouter-B` can mint non-native tokens because the representative contract for the token on its non-native chain is deployed by `BridgeRouter-B` when it received a message sending the token from another chain. The router has administrative rights on representations.
## Tracing a Message
Optics is currently still under active development. Because Optics batches messages and sends only tree roots, there is no way to track individual messages on-chain once a message is passed to the Home contract. A agent-querying tool could be built to query off-chain agents for individual transactions, but such a tool does not currently exist.
What this means for the token bridge is that there is going to be a state of unknown during the time of send and receipt. You can think of this as snail mail without any tracking but with delivery confirmation. The only things that can be confirmed on-chain are:
1) A transaction was sent on chain A to the BridgeRouter contract
2) The recipient addressed received a token mint on chain B
### Pseudo-tracking
1. Start by locating the `bridgeRouter` contract you are looking for, addresses in the config dir:
* [Dev Contracts](https://github.com/celo-org/optics-monorepo/tree/main/rust/config/development)
* [Staging Contracts](https://github.com/celo-org/optics-monorepo/tree/main/rust/config/staging)
* [Prod Contracts](https://github.com/celo-org/optics-monorepo/tree/main/rust/config/mainnet)
2. Verify that a transaction was sent to the BridgeRouter contract on the Home chain
* _Wait time_: dependent on block confirmation times for each chain
3. Verify a transaction was sent on the Home contract
* _Wait time_: dependent on block confirmation for each chain, but should be shortly after transaction is sent to BridgeRouter contract
* There is not a way to query for a particular transactions at this time. Cross-check timestamps with BridgeRouter transaction.
4. After acceptance period, verify a transaction was sent on the destination Replica
* _Wait time_: acceptance period. Currently ~3 hours
* Cross-check timestamps
5. Verify a transaction was sent on the destination BridgeRouter
* _Wait time_: acceptance period + block confirmation time
6. Verify that the recipient address received a token mint
1. _Wait time_: block confirmation time for chain A + acceptance period + block confirmation time for chain B

64
package-lock.json generated

@ -24855,12 +24855,12 @@
}, },
"solidity/apps": { "solidity/apps": {
"name": "@abacus-network/apps", "name": "@abacus-network/apps",
"version": "0.0.0", "version": "0.0.1",
"license": "MIT OR Apache-2.0", "license": "MIT OR Apache-2.0",
"dependencies": { "dependencies": {
"@abacus-network/core": "file:../core", "@abacus-network/core": "^0.0.3",
"@abacus-network/hardhat": "^0.0.5", "@abacus-network/hardhat": "^0.0.8",
"@abacus-network/utils": "^0.0.5", "@abacus-network/utils": "^0.0.7",
"@openzeppelin/contracts": "~3.4.2", "@openzeppelin/contracts": "~3.4.2",
"@openzeppelin/contracts-upgradeable": "~3.4.2", "@openzeppelin/contracts-upgradeable": "~3.4.2",
"@summa-tx/memview-sol": "^2.0.0" "@summa-tx/memview-sol": "^2.0.0"
@ -24889,10 +24889,10 @@
}, },
"solidity/core": { "solidity/core": {
"name": "@abacus-network/core", "name": "@abacus-network/core",
"version": "0.0.1", "version": "0.0.3",
"license": "MIT OR Apache-2.0", "license": "MIT OR Apache-2.0",
"dependencies": { "dependencies": {
"@abacus-network/utils": "^0.0.5", "@abacus-network/utils": "^0.0.7",
"@openzeppelin/contracts": "^3.4.2", "@openzeppelin/contracts": "^3.4.2",
"@openzeppelin/contracts-upgradeable": "~3.4.2", "@openzeppelin/contracts-upgradeable": "~3.4.2",
"@summa-tx/memview-sol": "^2.0.0", "@summa-tx/memview-sol": "^2.0.0",
@ -24984,7 +24984,7 @@
"version": "0.0.0", "version": "0.0.0",
"dependencies": { "dependencies": {
"@abacus-network/apps": "file:../../solidity/apps", "@abacus-network/apps": "file:../../solidity/apps",
"@abacus-network/sdk": "^0.0.1", "@abacus-network/sdk": "file:../sdk",
"@types/bunyan": "^1.8.7", "@types/bunyan": "^1.8.7",
"@types/express": "^4.17.13", "@types/express": "^4.17.13",
"@types/google-spreadsheet": "^3.1.5", "@types/google-spreadsheet": "^3.1.5",
@ -25017,11 +25017,11 @@
}, },
"typescript/deploy": { "typescript/deploy": {
"name": "@abacus-network/deploy", "name": "@abacus-network/deploy",
"version": "0.0.5", "version": "0.0.6",
"license": "MIT OR Apache-2.0", "license": "MIT OR Apache-2.0",
"dependencies": { "dependencies": {
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "^0.0.3",
"@abacus-network/sdk": "^0.0.1", "@abacus-network/sdk": "^0.0.3",
"@types/node": "^16.9.1", "@types/node": "^16.9.1",
"axios": "^0.21.3" "axios": "^0.21.3"
}, },
@ -25040,11 +25040,11 @@
}, },
"typescript/hardhat": { "typescript/hardhat": {
"name": "@abacus-network/hardhat", "name": "@abacus-network/hardhat",
"version": "0.0.5", "version": "0.0.8",
"license": "MIT OR Apache-2.0", "license": "MIT OR Apache-2.0",
"dependencies": { "dependencies": {
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "^0.0.3",
"@abacus-network/utils": "^0.0.5", "@abacus-network/utils": "^0.0.7",
"@nomiclabs/hardhat-ethers": "^2.0.5", "@nomiclabs/hardhat-ethers": "^2.0.5",
"@nomiclabs/hardhat-waffle": "^2.0.2", "@nomiclabs/hardhat-waffle": "^2.0.2",
"@typechain/hardhat": "^2.0.1", "@typechain/hardhat": "^2.0.1",
@ -25063,7 +25063,7 @@
"@abacus-network/apps": "file:../../solidity/apps", "@abacus-network/apps": "file:../../solidity/apps",
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "file:../../solidity/core",
"@abacus-network/deploy": "file:../deploy", "@abacus-network/deploy": "file:../deploy",
"@abacus-network/sdk": "^0.0.1", "@abacus-network/sdk": "file:../sdk",
"@aws-sdk/client-kms": "3.48.0", "@aws-sdk/client-kms": "3.48.0",
"@ethersproject/experimental": "^5.3.0", "@ethersproject/experimental": "^5.3.0",
"@nomiclabs/hardhat-etherscan": "^3.0.3", "@nomiclabs/hardhat-etherscan": "^3.0.3",
@ -25097,12 +25097,12 @@
}, },
"typescript/sdk": { "typescript/sdk": {
"name": "@abacus-network/sdk", "name": "@abacus-network/sdk",
"version": "0.0.1", "version": "0.0.3",
"license": "MIT OR Apache-2.0", "license": "MIT OR Apache-2.0",
"dependencies": { "dependencies": {
"@abacus-network/apps": "file:../../solidity/apps", "@abacus-network/apps": "^0.0.1",
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "^0.0.3",
"@abacus-network/utils": "file:../utils", "@abacus-network/utils": "^0.0.7",
"@ethersproject/bignumber": "^5.5.0", "@ethersproject/bignumber": "^5.5.0",
"@ethersproject/bytes": "^5.5.0", "@ethersproject/bytes": "^5.5.0",
"celo-ethers-provider": "0.0.0", "celo-ethers-provider": "0.0.0",
@ -25128,7 +25128,7 @@
}, },
"typescript/utils": { "typescript/utils": {
"name": "@abacus-network/utils", "name": "@abacus-network/utils",
"version": "0.0.5", "version": "0.0.7",
"license": "MIT OR Apache-2.0", "license": "MIT OR Apache-2.0",
"dependencies": { "dependencies": {
"chai": "^4.3.0", "chai": "^4.3.0",
@ -25140,9 +25140,9 @@
"@abacus-network/apps": { "@abacus-network/apps": {
"version": "file:solidity/apps", "version": "file:solidity/apps",
"requires": { "requires": {
"@abacus-network/core": "file:../core", "@abacus-network/core": "^0.0.3",
"@abacus-network/hardhat": "^0.0.5", "@abacus-network/hardhat": "^0.0.8",
"@abacus-network/utils": "^0.0.5", "@abacus-network/utils": "^0.0.7",
"@nomiclabs/hardhat-ethers": "^2.0.1", "@nomiclabs/hardhat-ethers": "^2.0.1",
"@nomiclabs/hardhat-waffle": "^2.0.1", "@nomiclabs/hardhat-waffle": "^2.0.1",
"@openzeppelin/contracts": "~3.4.2", "@openzeppelin/contracts": "~3.4.2",
@ -25171,7 +25171,7 @@
"version": "file:typescript/contract-metrics", "version": "file:typescript/contract-metrics",
"requires": { "requires": {
"@abacus-network/apps": "file:../../solidity/apps", "@abacus-network/apps": "file:../../solidity/apps",
"@abacus-network/sdk": "^0.0.1", "@abacus-network/sdk": "file:../sdk",
"@types/bunyan": "^1.8.7", "@types/bunyan": "^1.8.7",
"@types/express": "^4.17.13", "@types/express": "^4.17.13",
"@types/google-spreadsheet": "^3.1.5", "@types/google-spreadsheet": "^3.1.5",
@ -25204,7 +25204,7 @@
"@abacus-network/core": { "@abacus-network/core": {
"version": "file:solidity/core", "version": "file:solidity/core",
"requires": { "requires": {
"@abacus-network/utils": "^0.0.5", "@abacus-network/utils": "^0.0.7",
"@nomiclabs/hardhat-ethers": "^2.0.1", "@nomiclabs/hardhat-ethers": "^2.0.1",
"@nomiclabs/hardhat-waffle": "^2.0.1", "@nomiclabs/hardhat-waffle": "^2.0.1",
"@openzeppelin/contracts": "^3.4.2", "@openzeppelin/contracts": "^3.4.2",
@ -25231,8 +25231,8 @@
"@abacus-network/deploy": { "@abacus-network/deploy": {
"version": "file:typescript/deploy", "version": "file:typescript/deploy",
"requires": { "requires": {
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "^0.0.3",
"@abacus-network/sdk": "^0.0.1", "@abacus-network/sdk": "^0.0.3",
"@typechain/ethers-v5": "~7.0.0", "@typechain/ethers-v5": "~7.0.0",
"@types/node": "^16.9.1", "@types/node": "^16.9.1",
"axios": "^0.21.3", "axios": "^0.21.3",
@ -25252,8 +25252,8 @@
"@abacus-network/hardhat": { "@abacus-network/hardhat": {
"version": "file:typescript/hardhat", "version": "file:typescript/hardhat",
"requires": { "requires": {
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "^0.0.3",
"@abacus-network/utils": "^0.0.5", "@abacus-network/utils": "^0.0.7",
"@nomiclabs/hardhat-ethers": "^2.0.5", "@nomiclabs/hardhat-ethers": "^2.0.5",
"@nomiclabs/hardhat-waffle": "^2.0.2", "@nomiclabs/hardhat-waffle": "^2.0.2",
"@typechain/hardhat": "^2.0.1", "@typechain/hardhat": "^2.0.1",
@ -25269,9 +25269,9 @@
"@abacus-network/sdk": { "@abacus-network/sdk": {
"version": "file:typescript/sdk", "version": "file:typescript/sdk",
"requires": { "requires": {
"@abacus-network/apps": "file:../../solidity/apps", "@abacus-network/apps": "^0.0.1",
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "^0.0.3",
"@abacus-network/utils": "file:../utils", "@abacus-network/utils": "^0.0.7",
"@ethersproject/bignumber": "^5.5.0", "@ethersproject/bignumber": "^5.5.0",
"@ethersproject/bytes": "^5.5.0", "@ethersproject/bytes": "^5.5.0",
"@types/node": "^16.9.1", "@types/node": "^16.9.1",
@ -38219,7 +38219,7 @@
"@abacus-network/apps": "file:../../solidity/apps", "@abacus-network/apps": "file:../../solidity/apps",
"@abacus-network/core": "file:../../solidity/core", "@abacus-network/core": "file:../../solidity/core",
"@abacus-network/deploy": "file:../deploy", "@abacus-network/deploy": "file:../deploy",
"@abacus-network/sdk": "^0.0.1", "@abacus-network/sdk": "file:../sdk",
"@aws-sdk/client-kms": "3.48.0", "@aws-sdk/client-kms": "3.48.0",
"@ethersproject/experimental": "^5.3.0", "@ethersproject/experimental": "^5.3.0",
"@nomiclabs/hardhat-ethers": "^2.0.5", "@nomiclabs/hardhat-ethers": "^2.0.5",

2
rust/.gitignore vendored

@ -1,5 +1,5 @@
target target
processordb processordb
updaterdb validatordb
relayerdb relayerdb
kathydb kathydb

144
rust/Cargo.lock generated

@ -21,11 +21,11 @@ dependencies = [
"abacus-test", "abacus-test",
"async-trait", "async-trait",
"color-eyre 0.6.1", "color-eyre 0.6.1",
"config 0.10.1", "config",
"ethers", "ethers",
"futures-util", "futures-util",
"lazy_static", "lazy_static",
"mockall 0.10.2", "mockall",
"once_cell", "once_cell",
"opentelemetry", "opentelemetry",
"opentelemetry-jaeger", "opentelemetry-jaeger",
@ -120,10 +120,10 @@ dependencies = [
"abacus-ethereum", "abacus-ethereum",
"async-trait", "async-trait",
"color-eyre 0.5.11", "color-eyre 0.5.11",
"config 0.10.1", "config",
"ethers", "ethers",
"futures-util", "futures-util",
"mockall 0.10.2", "mockall",
"prometheus", "prometheus",
"rand 0.8.5", "rand 0.8.5",
"rocksdb", "rocksdb",
@ -645,7 +645,7 @@ dependencies = [
"abacus-test", "abacus-test",
"async-trait", "async-trait",
"color-eyre 0.5.11", "color-eyre 0.5.11",
"config 0.10.1", "config",
"ethers", "ethers",
"futures-util", "futures-util",
"log", "log",
@ -879,22 +879,6 @@ dependencies = [
"yaml-rust", "yaml-rust",
] ]
[[package]]
name = "config"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369"
dependencies = [
"lazy_static",
"nom 5.1.2",
"rust-ini",
"serde 1.0.130",
"serde-hjson",
"serde_json",
"toml",
"yaml-rust",
]
[[package]] [[package]]
name = "const-oid" name = "const-oid"
version = "0.7.1" version = "0.7.1"
@ -2191,7 +2175,7 @@ dependencies = [
"abacus-core", "abacus-core",
"async-trait", "async-trait",
"color-eyre 0.5.11", "color-eyre 0.5.11",
"config 0.10.1", "config",
"ethers", "ethers",
"futures-util", "futures-util",
"log", "log",
@ -2532,21 +2516,6 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "mockall"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18d614ad23f9bb59119b8b5670a85c7ba92c5e9adf4385c81ea00c51c8be33d5"
dependencies = [
"cfg-if 1.0.0",
"downcast",
"fragile",
"lazy_static",
"mockall_derive 0.9.1",
"predicates",
"predicates-tree",
]
[[package]] [[package]]
name = "mockall" name = "mockall"
version = "0.10.2" version = "0.10.2"
@ -2557,23 +2526,11 @@ dependencies = [
"downcast", "downcast",
"fragile", "fragile",
"lazy_static", "lazy_static",
"mockall_derive 0.10.2", "mockall_derive",
"predicates", "predicates",
"predicates-tree", "predicates-tree",
] ]
[[package]]
name = "mockall_derive"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dd4234635bca06fc96c7368d038061e0aae1b00a764dc817e900dc974e3deea"
dependencies = [
"cfg-if 1.0.0",
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "mockall_derive" name = "mockall_derive"
version = "0.10.2" version = "0.10.2"
@ -3294,33 +3251,6 @@ dependencies = [
"unicode-xid", "unicode-xid",
] ]
[[package]]
name = "processor"
version = "0.1.0"
dependencies = [
"abacus-base",
"abacus-core",
"abacus-test",
"async-trait",
"color-eyre 0.5.11",
"config 0.10.1",
"ethers",
"futures-util",
"log",
"paste",
"prometheus",
"rocksdb",
"rusoto_core",
"rusoto_s3",
"serde 1.0.130",
"serde_json",
"thiserror",
"tokio",
"tracing",
"tracing-futures",
"tracing-subscriber 0.2.25",
]
[[package]] [[package]]
name = "prometheus" name = "prometheus"
version = "0.12.0" version = "0.12.0"
@ -3560,7 +3490,7 @@ dependencies = [
"abacus-test", "abacus-test",
"async-trait", "async-trait",
"color-eyre 0.5.11", "color-eyre 0.5.11",
"config 0.10.1", "config",
"ethers", "ethers",
"futures-util", "futures-util",
"log", "log",
@ -4841,35 +4771,6 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
[[package]]
name = "updater"
version = "0.1.0"
dependencies = [
"abacus-base",
"abacus-core",
"abacus-ethereum",
"abacus-test",
"async-trait",
"color-eyre 0.5.11",
"config 0.11.0",
"ethers",
"futures-util",
"hex",
"log",
"mockall 0.9.1",
"paste",
"prometheus",
"rocksdb",
"serde 1.0.130",
"serde_json",
"thiserror",
"tokio",
"tracing",
"tracing-futures",
"tracing-subscriber 0.2.25",
"warp",
]
[[package]] [[package]]
name = "url" name = "url"
version = "2.2.2" version = "2.2.2"
@ -4908,7 +4809,7 @@ dependencies = [
"abacus-test", "abacus-test",
"async-trait", "async-trait",
"color-eyre 0.5.11", "color-eyre 0.5.11",
"config 0.10.1", "config",
"ethers", "ethers",
"futures-util", "futures-util",
"log", "log",
@ -5092,33 +4993,6 @@ dependencies = [
"web-sys", "web-sys",
] ]
[[package]]
name = "watcher"
version = "0.1.0"
dependencies = [
"abacus-base",
"abacus-core",
"abacus-ethereum",
"abacus-test",
"async-trait",
"color-eyre 0.5.11",
"config 0.10.1",
"ethers",
"futures-util",
"log",
"paste",
"prometheus",
"rocksdb",
"serde 1.0.130",
"serde_json",
"thiserror",
"tokio",
"tokio-test",
"tracing",
"tracing-futures",
"tracing-subscriber 0.2.25",
]
[[package]] [[package]]
name = "web-sys" name = "web-sys"
version = "0.3.55" version = "0.3.55"

@ -10,10 +10,7 @@ members = [
"agents/kathy", "agents/kathy",
"agents/checkpointer", "agents/checkpointer",
"agents/validator", "agents/validator",
"agents/updater",
"agents/relayer", "agents/relayer",
"agents/watcher",
"agents/processor",
"tools/kms-cli", "tools/kms-cli",
"tools/abacus-cli", "tools/abacus-cli",
"tools/balance-exporter" "tools/balance-exporter"

@ -28,11 +28,8 @@ RUN --mount=id=cargo,type=cache,target=/usr/src/target \
# Copy artifacts out of volume # Copy artifacts out of volume
WORKDIR /release WORKDIR /release
RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/updater . RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/validator .
RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/relayer . RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/relayer .
# Commented out pending https://github.com/celo-org/optics-monorepo/issues/782
# RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/watcher .
RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/processor .
RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/kathy . RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/kathy .
RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/kms-cli . RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/kms-cli .
RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/abacus-cli . RUN --mount=id=cargo,type=cache,target=/usr/src/target cp /usr/src/target/release/abacus-cli .
@ -44,10 +41,8 @@ RUN apt-get update && \
ca-certificates ca-certificates
WORKDIR /app WORKDIR /app
COPY --from=builder /release/updater . COPY --from=builder /release/validator .
COPY --from=builder /release/relayer . COPY --from=builder /release/relayer .
# COPY --from=builder /release/watcher .
COPY --from=builder /release/processor .
COPY --from=builder /release/kathy . COPY --from=builder /release/kathy .
COPY --from=builder /release/kms-cli . COPY --from=builder /release/kms-cli .
COPY --from=builder /release/abacus-cli . COPY --from=builder /release/abacus-cli .
@ -55,4 +50,4 @@ COPY config ./config
RUN chmod 777 /app RUN chmod 777 /app
RUN mkdir /usr/share/abacus/ && chmod 1000 /usr/share/abacus RUN mkdir /usr/share/abacus/ && chmod 1000 /usr/share/abacus
USER 1000 USER 1000
CMD ["./watcher"] CMD ["./validator"]

@ -1,11 +1,11 @@
use color_eyre::Result; use color_eyre::Result;
use abacus_base::{AbacusAgent, Settings}; use abacus_base::{Agent, Settings};
/// An example main function for any agent that implemented Default /// An example main function for any agent that implemented Default
async fn _example_main<OA>(settings: Settings) -> Result<()> async fn _example_main<OA>(settings: Settings) -> Result<()>
where where
OA: AbacusAgent<Settings = Settings> + Sized + 'static, OA: Agent<Settings = Settings> + Sized + 'static,
{ {
// Instantiate an agent // Instantiate an agent
let oa = OA::from_settings(settings).await?; let oa = OA::from_settings(settings).await?;
@ -14,8 +14,8 @@ where
.tracing .tracing
.start_tracing(oa.metrics().span_duration())?; .start_tracing(oa.metrics().span_duration())?;
// Use the agent to run a number of replicas // Run the agent
oa.run_all().await? oa.run_all(vec![]).await?
} }
/// Read settings from the config file and set up reporting and logging based /// Read settings from the config file and set up reporting and logging based

@ -2,33 +2,17 @@ use crate::{
cancel_task, cancel_task,
metrics::CoreMetrics, metrics::CoreMetrics,
settings::{IndexSettings, Settings}, settings::{IndexSettings, Settings},
CachingHome, CachingInbox, CachingOutbox, CachingReplica, ContractSyncMetrics, IndexDataTypes, CachingInbox, CachingOutbox,
}; };
use abacus_core::db::DB; use abacus_core::db::DB;
use async_trait::async_trait; use async_trait::async_trait;
use color_eyre::{eyre::WrapErr, Report, Result}; use color_eyre::{Report, Result};
use futures_util::future::select_all; use futures_util::future::select_all;
use tracing::instrument::Instrumented; use tracing::instrument::Instrumented;
use tracing::{info_span, Instrument}; use tracing::{info_span, Instrument};
use std::{collections::HashMap, sync::Arc}; use std::{collections::HashMap, sync::Arc};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
/// Properties shared across all agents
#[derive(Debug)]
pub struct AgentCore {
/// A boxed Home
pub home: Arc<CachingHome>,
/// A map of boxed Replicas
pub replicas: HashMap<String, Arc<CachingReplica>>,
/// A persistent KV Store (currently implemented as rocksdb)
pub db: DB,
/// Prometheus metrics
pub metrics: Arc<CoreMetrics>,
/// The height at which to start indexing the Home
pub indexer: IndexSettings,
/// Settings this agent was created with
pub settings: crate::settings::Settings,
}
/// Properties shared across all abacus agents /// Properties shared across all abacus agents
#[derive(Debug)] #[derive(Debug)]
@ -108,126 +92,3 @@ pub trait Agent: Send + Sync + std::fmt::Debug + AsRef<AbacusAgentCore> {
.instrument(span) .instrument(span)
} }
} }
/// TODO: Should be removed after transition period
/// A trait for an application:
/// that runs on a replica
/// and:
/// a reference to a home.
#[async_trait]
pub trait AbacusAgent: Send + Sync + std::fmt::Debug + AsRef<AgentCore> {
/// The agent's name
const AGENT_NAME: &'static str;
/// The settings object for this agent
type Settings: AsRef<Settings>;
/// Instantiate the agent from the standard settings object
async fn from_settings(settings: Self::Settings) -> Result<Self>
where
Self: Sized;
/// Return a handle to the metrics registry
fn metrics(&self) -> Arc<CoreMetrics> {
self.as_ref().metrics.clone()
}
/// Return a handle to the DB
fn db(&self) -> DB {
self.as_ref().db.clone()
}
/// Return a reference to a home contract
fn home(&self) -> Arc<CachingHome> {
self.as_ref().home.clone()
}
/// Get a reference to the replicas map
fn replicas(&self) -> &HashMap<String, Arc<CachingReplica>> {
&self.as_ref().replicas
}
/// Get a reference to a replica by its name
fn replica_by_name(&self, name: &str) -> Option<Arc<CachingReplica>> {
self.replicas().get(name).map(Clone::clone)
}
/// Run the agent with the given home and replica
/// If an agent does not need to use ContractSyncMetrics, just call run directly instead of run_all
fn run(&self, replica: &str) -> Instrumented<JoinHandle<Result<()>>>;
/// Run the Agent, and tag errors with the domain ID of the replica
#[allow(clippy::unit_arg)]
#[tracing::instrument]
fn run_report_error(&self, replica: &str) -> Instrumented<JoinHandle<Result<()>>> {
let m = format!("Task for replica named {} failed", replica);
let handle = self.run(replica).in_current_span();
let fut = async move { handle.await?.wrap_err(m) };
tokio::spawn(fut).in_current_span()
}
/// Run several agents by replica name
#[allow(clippy::unit_arg)]
fn run_many(&self, replicas: &[&str]) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("run_many");
let handles: Vec<_> = replicas
.iter()
.map(|replica| self.run_report_error(replica))
.collect();
tokio::spawn(async move {
// This gets the first future to resolve.
let (res, _, remaining) = select_all(handles).await;
for task in remaining.into_iter() {
cancel_task!(task);
}
res?
})
.instrument(span)
}
/// Run several agents
#[allow(clippy::unit_arg, unused_must_use)]
fn run_all(self) -> Instrumented<JoinHandle<Result<()>>>
where
Self: Sized + 'static,
{
let span = info_span!("run_all");
tokio::spawn(async move {
// this is the unused must use
let names: Vec<&str> = self.replicas().keys().map(|k| k.as_str()).collect();
let run_task = self.run_many(&names);
let mut tasks = vec![run_task];
// kludge
if Self::AGENT_NAME != "kathy" {
let index_settings = self.as_ref().indexer.clone();
let sync_metrics = ContractSyncMetrics::new(self.metrics(), None);
// Only the processor needs to index messages so default is
// just indexing updates
let sync_task = self.home().sync(
Self::AGENT_NAME.to_owned(),
index_settings,
sync_metrics,
IndexDataTypes::Updates,
);
tasks.push(sync_task);
}
let (res, _, remaining) = select_all(tasks).await;
for task in remaining.into_iter() {
cancel_task!(task);
}
res?
})
.instrument(span)
}
}

@ -1,57 +0,0 @@
use abacus_core::{ListValidity, SignedUpdateWithMeta};
use ethers::core::types::H256;
/// Optional latest new root struct. Optional struct to account for possibility
/// that ContractSync is still yet to see it's first update. We want to check
/// for validity of new list of updates against a potential previous update
/// (Some case) but also still validate the new updates in the case that we
/// have not seen any previous updates (None case).
#[derive(Debug)]
pub(crate) struct OptLatestNewRoot(Option<H256>);
impl From<H256> for OptLatestNewRoot {
fn from(latest_root: H256) -> Self {
Self(Some(latest_root))
}
}
impl From<Option<H256>> for OptLatestNewRoot {
fn from(opt: Option<H256>) -> Self {
Self(opt)
}
}
impl AsRef<Option<H256>> for OptLatestNewRoot {
fn as_ref(&self) -> &Option<H256> {
&self.0
}
}
impl OptLatestNewRoot {
/// Check if the list of sorted messages is a valid continuation of the OptLatestMessage. If self is Some, check the validity of the list in continuation of self. If self is None, check the validity of just the list.
pub fn valid_continuation(&self, sorted_updates: &[SignedUpdateWithMeta]) -> ListValidity {
if sorted_updates.is_empty() {
return ListValidity::Empty;
}
// If we have seen another update in a previous block range, ensure
// the batch contains an update building off latest known root
if let Some(last_seen) = self.as_ref() {
let has_desired_update = sorted_updates
.iter()
.any(|update| *last_seen == update.signed_update.update.previous_root);
if !has_desired_update {
return ListValidity::Invalid;
}
}
// Ensure no gaps in new batch of leaves
for pair in sorted_updates.windows(2) {
if pair[0].signed_update.update.new_root != pair[1].signed_update.update.previous_root {
return ListValidity::Invalid;
}
}
ListValidity::Valid
}
}

@ -5,13 +5,13 @@ use std::sync::Arc;
/// Struct encapsulating prometheus metrics used by the ContractSync. /// Struct encapsulating prometheus metrics used by the ContractSync.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct ContractSyncMetrics { pub struct ContractSyncMetrics {
/// Most recently indexed block height (label values differentiate updates /// Most recently indexed block height (label values differentiate checkpoints
/// vs. messages) /// vs. messages)
pub indexed_height: IntGaugeVec, pub indexed_height: IntGaugeVec,
/// Events stored into DB (label values differentiate updates vs. messages) /// Events stored into DB (label values differentiate checkpoints vs. messages)
pub stored_events: IntGaugeVec, pub stored_events: IntGaugeVec,
/// Unique occasions when agent missed an event (label values /// Unique occasions when agent missed an event (label values
/// differentiate updates vs. messages) /// differentiate checkpoints vs. messages)
pub missed_events: IntGaugeVec, pub missed_events: IntGaugeVec,
/// An optional gauge for tracking the latest message leafs that are being indexed /// An optional gauge for tracking the latest message leafs that are being indexed
pub message_leaf_index: Option<IntGauge>, pub message_leaf_index: Option<IntGauge>,

@ -3,7 +3,7 @@
use crate::settings::IndexSettings; use crate::settings::IndexSettings;
use abacus_core::db::AbacusDB; use abacus_core::db::AbacusDB;
use abacus_core::{AbacusCommonIndexer, CommonIndexer, HomeIndexer, ListValidity, OutboxIndexer}; use abacus_core::{AbacusCommonIndexer, ListValidity, OutboxIndexer};
use tokio::time::sleep; use tokio::time::sleep;
use tracing::{info, info_span, warn}; use tracing::{info, info_span, warn};
@ -14,22 +14,19 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
mod last_message; mod last_message;
mod last_update;
mod metrics; mod metrics;
mod schema; mod schema;
use last_message::OptLatestLeafIndex; use last_message::OptLatestLeafIndex;
use last_update::OptLatestNewRoot;
pub use metrics::ContractSyncMetrics; pub use metrics::ContractSyncMetrics;
use schema::{CommonContractSyncDB, HomeContractSyncDB}; use schema::OutboxContractSyncDB;
const UPDATES_LABEL: &str = "updates";
const MESSAGES_LABEL: &str = "messages"; const MESSAGES_LABEL: &str = "messages";
/// Entity that drives the syncing of an agent's db with on-chain data. /// Entity that drives the syncing of an agent's db with on-chain data.
/// Extracts chain-specific data (emitted updates, messages, etc) from an /// Extracts chain-specific data (emitted checkpoints, messages, etc) from an
/// `indexer` and fills the agent's db with this data. A CachingHome or /// `indexer` and fills the agent's db with this data. A CachingOutbox or
/// CachingReplica will use a contract sync to spawn syncing tasks to keep the /// CachingInbox will use a contract sync to spawn syncing tasks to keep the
/// db up-to-date. /// db up-to-date.
#[derive(Debug)] #[derive(Debug)]
pub struct ContractSync<I> { pub struct ContractSync<I> {
@ -62,147 +59,6 @@ impl<I> ContractSync<I> {
} }
} }
impl<I> ContractSync<I>
where
I: CommonIndexer + 'static,
{
/// Spawn task that continuously looks for new on-chain updates and stores
/// them in db
pub fn sync_updates(&self) -> Instrumented<tokio::task::JoinHandle<color_eyre::Result<()>>> {
let span = info_span!("UpdateContractSync");
let db = self.db.clone();
let indexer = self.indexer.clone();
let indexed_height = self.metrics.indexed_height.clone().with_label_values(&[
UPDATES_LABEL,
&self.contract_name,
&self.agent_name,
]);
let stored_updates = self.metrics.stored_events.clone().with_label_values(&[
UPDATES_LABEL,
&self.contract_name,
&self.agent_name,
]);
let missed_updates = self.metrics.missed_events.clone().with_label_values(&[
UPDATES_LABEL,
&self.contract_name,
&self.agent_name,
]);
let config_from = self.index_settings.from();
let chunk_size = self.index_settings.chunk_size();
tokio::spawn(async move {
let mut from = db
.retrieve_update_latest_block_end()
.map_or_else(|| config_from, |h| h + 1);
let mut finding_missing = false;
let mut realized_missing_start_block: u32 = Default::default();
let mut realized_missing_end_block: u32 = Default::default();
let mut exponential: u32 = Default::default();
info!(from = from, "[Updates]: resuming indexer from {}", from);
loop {
indexed_height.set(from as i64);
// If we were searching for missing update and have reached
// original missing start block, turn off finding_missing and
// TRY to resume normal indexing
if finding_missing && from >= realized_missing_start_block {
info!("Turning off finding_missing mode");
finding_missing = false;
}
// If we have passed the end block of the missing update, we
// have found the update and can reset variables
if from > realized_missing_end_block && realized_missing_end_block != 0 {
missed_updates.inc();
exponential = 0;
realized_missing_start_block = 0;
realized_missing_end_block = 0;
}
let tip = indexer.get_block_number().await?;
if tip <= from {
// Sleep if we caught up to tip
sleep(Duration::from_secs(100)).await;
continue;
}
let candidate = from + chunk_size;
let to = min(tip, candidate);
info!(
from = from,
to = to,
"[Updates]: indexing block heights {}...{}",
from,
to
);
let sorted_updates = indexer.fetch_sorted_updates(from, to).await?;
// If no updates found, update last seen block and next height
// and continue
if sorted_updates.is_empty() {
db.store_update_latest_block_end(to)?;
from = to + 1;
continue;
}
// If updates found, check that list is valid
let last_new_root: OptLatestNewRoot = db.retrieve_latest_root()?.into();
match last_new_root.valid_continuation(&sorted_updates) {
ListValidity::Valid => {
// Store updates
db.store_updates_and_meta(&sorted_updates)?;
// Report amount of updates stored into db
stored_updates.add(sorted_updates.len().try_into()?);
// Move forward next height
db.store_update_latest_block_end(to)?;
from = to + 1;
}
ListValidity::Invalid => {
if finding_missing {
from = to + 1;
} else {
warn!(
last_new_root = ?last_new_root,
start_block = from,
end_block = to,
"[Updates]: RPC failed to find update(s) between blocks {}...{}. Last seen new root: {:?}. Activating finding_missing mode.",
from,
to,
last_new_root,
);
// Turn on finding_missing mode
finding_missing = true;
realized_missing_start_block = from;
realized_missing_end_block = to;
from = realized_missing_start_block
- (chunk_size * 2u32.pow(exponential as u32));
exponential += 1;
}
}
ListValidity::Empty => {
unreachable!("Attempted to validate empty list of updates")
}
};
}
})
.instrument(span)
}
}
impl<I> ContractSync<I> impl<I> ContractSync<I>
where where
I: AbacusCommonIndexer + 'static, I: AbacusCommonIndexer + 'static,
@ -222,158 +78,6 @@ where
} }
} }
impl<I> ContractSync<I>
where
I: HomeIndexer + 'static,
{
/// Spawn task that continuously looks for new on-chain messages and stores
/// them in db
pub fn sync_messages(&self) -> Instrumented<tokio::task::JoinHandle<color_eyre::Result<()>>> {
let span = info_span!("MessageContractSync");
let db = self.db.clone();
let indexer = self.indexer.clone();
let indexed_height = self.metrics.indexed_height.clone().with_label_values(&[
MESSAGES_LABEL,
&self.contract_name,
&self.agent_name,
]);
let stored_messages = self.metrics.stored_events.clone().with_label_values(&[
MESSAGES_LABEL,
&self.contract_name,
&self.agent_name,
]);
let missed_messages = self.metrics.missed_events.clone().with_label_values(&[
MESSAGES_LABEL,
&self.contract_name,
&self.agent_name,
]);
let message_leaf_index = self.metrics.message_leaf_index.clone();
let config_from = self.index_settings.from();
let chunk_size = self.index_settings.chunk_size();
tokio::spawn(async move {
let mut from = db
.retrieve_message_latest_block_end()
.map_or_else(|| config_from, |h| h + 1);
let mut finding_missing = false;
let mut realized_missing_start_block = 0;
let mut realized_missing_end_block = 0;
let mut exponential = 0;
info!(from = from, "[Messages]: resuming indexer from {}", from);
// Set the metrics with the latest known leaf index
if let Ok(Some(idx)) = db.retrieve_latest_leaf_index() {
if let Some(gauge) = message_leaf_index.as_ref() {
gauge.set(idx as i64);
}
}
loop {
indexed_height.set(from as i64);
// If we were searching for missing message and have reached
// original missing start block, turn off finding_missing and
// TRY to resume normal indexing
if finding_missing && from >= realized_missing_start_block {
info!("Turning off finding_missing mode");
finding_missing = false;
}
// If we have passed the end block of the missing message, we
// have found the message and can reset variables
if from > realized_missing_end_block && realized_missing_end_block != 0 {
missed_messages.inc();
exponential = 0;
realized_missing_start_block = 0;
realized_missing_end_block = 0;
}
let tip = indexer.get_block_number().await?;
if tip <= from {
// Sleep if caught up to tip
sleep(Duration::from_secs(100)).await;
continue;
}
let candidate = from + chunk_size;
let to = min(tip, candidate);
info!(
from = from,
to = to,
"[Messages]: indexing block heights {}...{}",
from,
to
);
let sorted_messages = indexer.fetch_sorted_messages(from, to).await?;
// If no messages found, update last seen block and next height
// and continue
if sorted_messages.is_empty() {
db.store_message_latest_block_end(to)?;
from = to + 1;
continue;
}
// If messages found, check that list is valid
let last_leaf_index: OptLatestLeafIndex = db.retrieve_latest_leaf_index()?.into();
match &last_leaf_index.valid_continuation(&sorted_messages) {
ListValidity::Valid => {
// Store messages
let max_leaf_index_of_batch = db.store_messages(&sorted_messages)?;
// Report amount of messages stored into db
stored_messages.add(sorted_messages.len().try_into()?);
// Report latest leaf index to gauge
if let Some(gauge) = message_leaf_index.as_ref() {
gauge.set(max_leaf_index_of_batch as i64);
}
// Move forward next height
db.store_message_latest_block_end(to)?;
from = to + 1;
}
ListValidity::Invalid => {
if finding_missing {
from = to + 1;
} else {
warn!(
last_leaf_index = ?last_leaf_index,
start_block = from,
end_block = to,
"[Messages]: RPC failed to find message(s) between blocks {}...{}. Last seen leaf index: {:?}. Activating finding_missing mode.",
from,
to,
last_leaf_index,
);
// Turn on finding_missing mode
finding_missing = true;
realized_missing_start_block = from;
realized_missing_end_block = to;
from = realized_missing_start_block - (chunk_size * 2u32.pow(exponential as u32));
exponential += 1;
}
}
ListValidity::Empty => unreachable!("Tried to validate empty list of messages"),
};
}
})
.instrument(span)
}
}
impl<I> ContractSync<I> impl<I> ContractSync<I>
where where
I: OutboxIndexer + 'static, I: OutboxIndexer + 'static,
@ -530,16 +234,15 @@ where
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use abacus_test::mocks::MockIndexer; use abacus_test::mocks::indexer::MockAbacusIndexer;
use mockall::*; use mockall::*;
use std::sync::Arc; use std::sync::Arc;
use ethers::core::types::H256; use ethers::core::types::H256;
use ethers::signers::LocalWallet;
use abacus_core::{ use abacus_core::{
AbacusMessage, Encode, RawCommittedMessage, SignedUpdateWithMeta, Update, UpdateMeta, AbacusMessage, Checkpoint, CheckpointMeta, CheckpointWithMeta, Encode, RawCommittedMessage,
}; };
use abacus_test::test_utils; use abacus_test::test_utils;
@ -547,159 +250,141 @@ mod test {
use crate::CoreMetrics; use crate::CoreMetrics;
#[tokio::test] #[tokio::test]
async fn handles_missing_rpc_updates() { #[ignore]
// Checkpoints are not indexed at the moment, remove #[ignore] when checkpoint
// indexing is implemented to use this test.
async fn handles_missing_rpc_checkpoints() {
test_utils::run_test_db(|db| async move { test_utils::run_test_db(|db| async move {
let signer: LocalWallet =
"1111111111111111111111111111111111111111111111111111111111111111"
.parse()
.unwrap();
let first_root = H256::from([0; 32]); let first_root = H256::from([0; 32]);
let second_root = H256::from([1; 32]); let second_root = H256::from([1; 32]);
let third_root = H256::from([2; 32]); let third_root = H256::from([2; 32]);
let fourth_root = H256::from([3; 32]); let fourth_root = H256::from([3; 32]);
let fifth_root = H256::from([4; 32]); let fifth_root = H256::from([4; 32]);
let sixth_root = H256::from([4; 32]);
let first_update = Update { let first_checkpoint = Checkpoint {
home_domain: 1, outbox_domain: 1,
previous_root: first_root, root: first_root,
new_root: second_root, index: 1,
} };
.sign_with(&signer)
.await let second_checkpoint = Checkpoint {
.expect("!sign"); outbox_domain: 1,
root: second_root,
let second_update = Update { index: 2,
home_domain: 1, };
previous_root: second_root,
new_root: third_root, let third_checkpoint = Checkpoint {
} outbox_domain: 1,
.sign_with(&signer) root: third_root,
.await index: 3,
.expect("!sign"); };
let third_update = Update { let fourth_checkpoint = Checkpoint {
home_domain: 1, outbox_domain: 1,
previous_root: third_root, root: fourth_root,
new_root: fourth_root, index: 4,
} };
.sign_with(&signer)
.await
.expect("!sign");
let fourth_update = Update {
home_domain: 1,
previous_root: fourth_root,
new_root: fifth_root,
}
.sign_with(&signer)
.await
.expect("!sign");
let fifth_update = Update {
home_domain: 1,
previous_root: fifth_root,
new_root: sixth_root,
}
.sign_with(&signer)
.await
.expect("!sign");
let mut mock_indexer = MockIndexer::new(); let fifth_checkpoint = Checkpoint {
outbox_domain: 1,
root: fifth_root,
index: 5,
};
let mut mock_indexer = MockAbacusIndexer::new();
{ {
let mut seq = Sequence::new(); let mut seq = Sequence::new();
let first_update_with_meta = SignedUpdateWithMeta { let first_checkpoint_with_meta = CheckpointWithMeta {
signed_update: first_update.clone(), checkpoint: first_checkpoint.clone(),
metadata: UpdateMeta { block_number: 5 }, metadata: CheckpointMeta { block_number: 5 },
}; };
let second_update_with_meta = SignedUpdateWithMeta { let second_checkpoint_with_meta = CheckpointWithMeta {
signed_update: second_update.clone(), checkpoint: second_checkpoint.clone(),
metadata: UpdateMeta { block_number: 15 }, metadata: CheckpointMeta { block_number: 15 },
}; };
let second_update_with_meta_clone = second_update_with_meta.clone(); let second_checkpoint_with_meta_clone = second_checkpoint_with_meta.clone();
let third_update_with_meta = SignedUpdateWithMeta { let third_checkpoint_with_meta = CheckpointWithMeta {
signed_update: third_update.clone(), checkpoint: third_checkpoint.clone(),
metadata: UpdateMeta { block_number: 15 }, metadata: CheckpointMeta { block_number: 15 },
}; };
let fourth_update_with_meta = SignedUpdateWithMeta { let fourth_checkpoint_with_meta = CheckpointWithMeta {
signed_update: fourth_update.clone(), checkpoint: fourth_checkpoint.clone(),
metadata: UpdateMeta { block_number: 25 }, metadata: CheckpointMeta { block_number: 25 },
}; };
let fourth_update_with_meta_clone_1 = fourth_update_with_meta.clone(); let fourth_checkpoint_with_meta_clone_1 = fourth_checkpoint_with_meta.clone();
let fourth_update_with_meta_clone_2 = fourth_update_with_meta.clone(); let fourth_checkpoint_with_meta_clone_2 = fourth_checkpoint_with_meta.clone();
let fifth_update_with_meta = SignedUpdateWithMeta { let fifth_checkpoint_with_meta = CheckpointWithMeta {
signed_update: fifth_update.clone(), checkpoint: fifth_checkpoint.clone(),
metadata: UpdateMeta { block_number: 55 }, metadata: CheckpointMeta { block_number: 55 },
}; };
let fifth_update_with_meta_clone_1 = fifth_update_with_meta.clone(); let fifth_checkpoint_with_meta_clone_1 = fifth_checkpoint_with_meta.clone();
let fifth_update_with_meta_clone_2 = fifth_update_with_meta.clone(); let fifth_checkpoint_with_meta_clone_2 = fifth_checkpoint_with_meta.clone();
let fifth_update_with_meta_clone_3 = fifth_update_with_meta.clone(); let fifth_checkpoint_with_meta_clone_3 = fifth_checkpoint_with_meta.clone();
// Return first update // Return first checkpoint
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![first_update_with_meta])); .return_once(move |_, _| Ok(vec![first_checkpoint_with_meta]));
// Return second update, misses third update // Return second checkpoint, misses third checkpoint
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![second_update_with_meta])); .return_once(move |_, _| Ok(vec![second_checkpoint_with_meta]));
// --> miss fourth update // --> miss fourth checkpoint
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![])); .return_once(move |_, _| Ok(vec![]));
// Next block range is empty updates // Next block range is empty checkpoints
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![])); .return_once(move |_, _| Ok(vec![]));
// second --> return fifth update is invalid // second --> return fifth checkpoint is invalid
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![fifth_update_with_meta])); .return_once(move |_, _| Ok(vec![fifth_checkpoint_with_meta]));
// Indexer goes back and tries empty block range // Indexer goes back and tries empty block range
mock_indexer mock_indexer
@ -708,7 +393,7 @@ mod test {
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![])); .return_once(move |_, _| Ok(vec![]));
@ -721,22 +406,22 @@ mod test {
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![fifth_update_with_meta_clone_1])); .return_once(move |_, _| Ok(vec![fifth_checkpoint_with_meta_clone_1]));
// Indexer goes back further and gets to the fourth update // Indexer goes back further and gets to the fourth checkpoint
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![fourth_update_with_meta_clone_1])); .return_once(move |_, _| Ok(vec![fourth_checkpoint_with_meta_clone_1]));
// Indexer goes further for empty range // Indexer goes further for empty range
mock_indexer mock_indexer
@ -745,48 +430,51 @@ mod test {
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![])); .return_once(move |_, _| Ok(vec![]));
// Indexer goes back further and gets to the fifth update // Indexer goes back further and gets to the fifth checkpoint
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![fifth_update_with_meta_clone_2])); .return_once(move |_, _| Ok(vec![fifth_checkpoint_with_meta_clone_2]));
// Indexer goes back even further to find 2nd and 3rd update // Indexer goes back even further to find 2nd and 3rd checkpoint
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| { .return_once(move |_, _| {
Ok(vec![second_update_with_meta_clone, third_update_with_meta]) Ok(vec![
second_checkpoint_with_meta_clone,
third_checkpoint_with_meta,
])
}); });
// Indexer goes forward and gets to the fourth update again // Indexer goes forward and gets to the fourth checkpoint again
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![fourth_update_with_meta_clone_2])); .return_once(move |_, _| Ok(vec![fourth_checkpoint_with_meta_clone_2]));
// Indexer goes further for empty range // Indexer goes further for empty range
mock_indexer mock_indexer
@ -795,22 +483,22 @@ mod test {
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![])); .return_once(move |_, _| Ok(vec![]));
// Indexer goes back further and gets to the fifth update // Indexer goes back further and gets to the fifth checkpoint
mock_indexer mock_indexer
.expect__get_block_number() .expect__get_block_number()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.times(1) .times(1)
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(move |_, _| Ok(vec![fifth_update_with_meta_clone_3])); .return_once(move |_, _| Ok(vec![fifth_checkpoint_with_meta_clone_3]));
// Return empty vec for remaining calls // Return empty vec for remaining calls
mock_indexer mock_indexer
@ -819,11 +507,11 @@ mod test {
.in_sequence(&mut seq) .in_sequence(&mut seq)
.return_once(|| Ok(100)); .return_once(|| Ok(100));
mock_indexer mock_indexer
.expect__fetch_sorted_updates() .expect__fetch_sorted_checkpoints()
.return_once(move |_, _| Ok(vec![])); .return_once(move |_, _| Ok(vec![]));
} }
let abacus_db = AbacusDB::new("home_1", db); let abacus_db = AbacusDB::new("outbox_1", db);
let indexer = Arc::new(mock_indexer); let indexer = Arc::new(mock_indexer);
let metrics = Arc::new( let metrics = Arc::new(
@ -839,7 +527,7 @@ mod test {
let contract_sync = ContractSync::new( let contract_sync = ContractSync::new(
"agent".to_owned(), "agent".to_owned(),
"home_1".to_owned(), "outbox_1".to_owned(),
abacus_db.clone(), abacus_db.clone(),
indexer.clone(), indexer.clone(),
IndexSettings { IndexSettings {
@ -849,46 +537,49 @@ mod test {
sync_metrics, sync_metrics,
); );
let sync_task = contract_sync.sync_updates(); let sync_task = contract_sync.sync_checkpoints();
sleep(Duration::from_secs(3)).await; sleep(Duration::from_secs(3)).await;
cancel_task!(sync_task); cancel_task!(sync_task);
assert_eq!( // Checkpoints indexing is not implemented at the moment.
abacus_db // This can be used when it's implemented in the future.
.update_by_previous_root(first_root)
.expect("!db") // assert_eq!(
.expect("!update"), // abacus_db
first_update.clone() // .checkpoint_by_previous_root(first_root)
); // .expect("!db")
assert_eq!( // .expect("!checkpoint"),
abacus_db // first_checkpoint.clone()
.update_by_previous_root(second_root) // );
.expect("!db") // assert_eq!(
.expect("!update"), // abacus_db
second_update.clone() // .checkpoint_by_previous_root(second_root)
); // .expect("!db")
assert_eq!( // .expect("!checkpoint"),
abacus_db // second_checkpoint.clone()
.update_by_previous_root(third_root) // );
.expect("!db") // assert_eq!(
.expect("!update"), // abacus_db
third_update.clone() // .checkpoint_by_previous_root(third_root)
); // .expect("!db")
assert_eq!( // .expect("!checkpoint"),
abacus_db // third_checkpoint.clone()
.update_by_previous_root(fourth_root) // );
.expect("!db") // assert_eq!(
.expect("!update"), // abacus_db
fourth_update.clone() // .checkpoint_by_previous_root(fourth_root)
); // .expect("!db")
// .expect("!checkpoint"),
assert_eq!( // fourth_checkpoint.clone()
abacus_db // );
.update_by_previous_root(fifth_root)
.expect("!db") // assert_eq!(
.expect("!update"), // abacus_db
fifth_update.clone() // .checkpoint_by_previous_root(fifth_root)
); // .expect("!db")
// .expect("!checkpoint"),
// fifth_checkpoint.clone()
// );
}) })
.await .await
} }
@ -949,7 +640,7 @@ mod test {
let fifth_message_clone_2 = fifth_message.clone(); let fifth_message_clone_2 = fifth_message.clone();
let fifth_message_clone_3 = fifth_message.clone(); let fifth_message_clone_3 = fifth_message.clone();
let mut mock_indexer = MockIndexer::new(); let mut mock_indexer = MockAbacusIndexer::new();
{ {
let mut seq = Sequence::new(); let mut seq = Sequence::new();
@ -1133,7 +824,7 @@ mod test {
.return_once(move |_, _| Ok(vec![])); .return_once(move |_, _| Ok(vec![]));
} }
let abacus_db = AbacusDB::new("home_1", db); let abacus_db = AbacusDB::new("outbox_1", db);
let indexer = Arc::new(mock_indexer); let indexer = Arc::new(mock_indexer);
let metrics = Arc::new( let metrics = Arc::new(
@ -1149,7 +840,7 @@ mod test {
let contract_sync = ContractSync::new( let contract_sync = ContractSync::new(
"agent".to_owned(), "agent".to_owned(),
"home_1".to_owned(), "outbox_1".to_owned(),
abacus_db.clone(), abacus_db.clone(),
indexer.clone(), indexer.clone(),
IndexSettings { IndexSettings {
@ -1159,7 +850,7 @@ mod test {
sync_metrics, sync_metrics,
); );
let sync_task = contract_sync.sync_messages(); let sync_task = contract_sync.sync_outbox_messages();
sleep(Duration::from_secs(3)).await; sleep(Duration::from_secs(3)).await;
cancel_task!(sync_task); cancel_task!(sync_task);

@ -2,31 +2,14 @@ use abacus_core::db::AbacusDB;
use abacus_core::db::DbError; use abacus_core::db::DbError;
use color_eyre::Result; use color_eyre::Result;
static UPDATES_LAST_BLOCK_END: &str = "updates_last_inspected";
static MESSAGES_LAST_BLOCK_END: &str = "messages_last_inspected"; static MESSAGES_LAST_BLOCK_END: &str = "messages_last_inspected";
pub(crate) trait CommonContractSyncDB { pub(crate) trait OutboxContractSyncDB {
fn store_update_latest_block_end(&self, latest_block: u32) -> Result<(), DbError>;
fn retrieve_update_latest_block_end(&self) -> Option<u32>;
}
pub(crate) trait HomeContractSyncDB {
fn store_message_latest_block_end(&self, latest_block: u32) -> Result<(), DbError>; fn store_message_latest_block_end(&self, latest_block: u32) -> Result<(), DbError>;
fn retrieve_message_latest_block_end(&self) -> Option<u32>; fn retrieve_message_latest_block_end(&self) -> Option<u32>;
} }
impl CommonContractSyncDB for AbacusDB { impl OutboxContractSyncDB for AbacusDB {
fn store_update_latest_block_end(&self, latest_block: u32) -> Result<(), DbError> {
self.store_encodable("", UPDATES_LAST_BLOCK_END, &latest_block)
}
fn retrieve_update_latest_block_end(&self) -> Option<u32> {
self.retrieve_decodable("", UPDATES_LAST_BLOCK_END)
.expect("db failure")
}
}
impl HomeContractSyncDB for AbacusDB {
fn store_message_latest_block_end(&self, latest_block: u32) -> Result<(), DbError> { fn store_message_latest_block_end(&self, latest_block: u32) -> Result<(), DbError> {
self.store_encodable("", MESSAGES_LAST_BLOCK_END, &latest_block) self.store_encodable("", MESSAGES_LAST_BLOCK_END, &latest_block)
} }

@ -1,433 +0,0 @@
use abacus_core::db::AbacusDB;
use abacus_core::{
ChainCommunicationError, Common, CommonEvents, DoubleUpdate, Home, HomeEvents, Message,
RawCommittedMessage, SignedUpdate, State, TxOutcome, Update,
};
use abacus_ethereum::EthereumHome;
use abacus_test::mocks::MockHomeContract;
use async_trait::async_trait;
use color_eyre::eyre::Result;
use ethers::core::types::H256;
use futures_util::future::select_all;
use std::str::FromStr;
use std::sync::Arc;
use tokio::task::JoinHandle;
use tokio::time::{sleep, Duration};
use tracing::{info_span, Instrument};
use tracing::{instrument, instrument::Instrumented};
use crate::{ContractSync, ContractSyncMetrics, HomeIndexers, IndexSettings};
/// Which data types the home ContractSync should index
#[derive(Debug, Clone)]
pub enum IndexDataTypes {
/// Updates
Updates,
/// Messages
Messages,
/// Updates and messages
Both,
}
/// Caching replica type
#[derive(Debug)]
pub struct CachingHome {
home: Homes,
db: AbacusDB,
indexer: Arc<HomeIndexers>,
}
impl std::fmt::Display for CachingHome {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl CachingHome {
/// Instantiate new CachingHome
pub fn new(home: Homes, db: AbacusDB, indexer: Arc<HomeIndexers>) -> Self {
Self { home, db, indexer }
}
/// Return handle on home object
pub fn home(&self) -> Homes {
self.home.clone()
}
/// Return handle on AbacusDB
pub fn db(&self) -> AbacusDB {
self.db.clone()
}
/// Spawn a task that syncs the CachingHome's db with the on-chain event
/// data
pub fn sync(
&self,
agent_name: String,
index_settings: IndexSettings,
metrics: ContractSyncMetrics,
data_types: IndexDataTypes,
) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("HomeContractSync", self = %self);
let sync = ContractSync::new(
agent_name,
String::from_str(self.home.name()).expect("!string"),
self.db.clone(),
self.indexer.clone(),
index_settings,
metrics,
);
tokio::spawn(async move {
let tasks = match data_types {
IndexDataTypes::Updates => vec![sync.sync_updates()],
IndexDataTypes::Messages => vec![sync.sync_messages()],
IndexDataTypes::Both => vec![sync.sync_updates(), sync.sync_messages()],
};
let (_, _, remaining) = select_all(tasks).await;
for task in remaining.into_iter() {
cancel_task!(task);
}
Ok(())
})
.instrument(span)
}
}
#[async_trait]
impl Home for CachingHome {
fn local_domain(&self) -> u32 {
self.home.local_domain()
}
fn home_domain_hash(&self) -> H256 {
self.home.home_domain_hash()
}
async fn nonces(&self, destination: u32) -> Result<u32, ChainCommunicationError> {
self.home.nonces(destination).await
}
async fn dispatch(&self, message: &Message) -> Result<TxOutcome, ChainCommunicationError> {
self.home.dispatch(message).await
}
async fn queue_contains(&self, root: H256) -> Result<bool, ChainCommunicationError> {
self.home.queue_contains(root).await
}
async fn improper_update(
&self,
update: &SignedUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
self.home.improper_update(update).await
}
async fn produce_update(&self) -> Result<Option<Update>, ChainCommunicationError> {
self.home.produce_update().await
}
}
#[async_trait]
impl HomeEvents for CachingHome {
#[tracing::instrument(err, skip(self))]
async fn raw_message_by_nonce(
&self,
destination: u32,
nonce: u32,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {
loop {
if let Some(message) = self.db.message_by_nonce(destination, nonce)? {
return Ok(Some(message));
}
sleep(Duration::from_millis(500)).await;
}
}
#[tracing::instrument(err, skip(self))]
async fn raw_message_by_leaf(
&self,
leaf: H256,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {
loop {
if let Some(message) = self.db.message_by_leaf(leaf)? {
return Ok(Some(message));
}
sleep(Duration::from_millis(500)).await;
}
}
async fn leaf_by_tree_index(
&self,
tree_index: usize,
) -> Result<Option<H256>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.leaf_by_leaf_index(tree_index as u32)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
}
}
}
#[async_trait]
impl Common for CachingHome {
fn name(&self) -> &str {
self.home.name()
}
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
self.home.status(txid).await
}
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
self.home.updater().await
}
async fn state(&self) -> Result<State, ChainCommunicationError> {
self.home.state().await
}
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
self.home.committed_root().await
}
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
self.home.update(update).await
}
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
self.home.double_update(double).await
}
}
#[async_trait]
impl CommonEvents for CachingHome {
#[tracing::instrument(err, skip(self))]
async fn signed_update_by_old_root(
&self,
old_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.update_by_previous_root(old_root)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
}
}
#[tracing::instrument(err, skip(self))]
async fn signed_update_by_new_root(
&self,
new_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.update_by_new_root(new_root)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
}
}
}
#[derive(Debug, Clone)]
/// Arc wrapper for HomeVariants enum
pub struct Homes(Arc<HomeVariants>);
impl From<HomeVariants> for Homes {
fn from(homes: HomeVariants) -> Self {
Self(Arc::new(homes))
}
}
impl std::ops::Deref for Homes {
type Target = Arc<HomeVariants>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for Homes {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Home type
#[derive(Debug)]
pub enum HomeVariants {
/// Ethereum home contract
Ethereum(Box<dyn Home>),
/// Mock home contract
Mock(Box<MockHomeContract>),
/// Other home variant
Other(Box<dyn Home>),
}
impl HomeVariants {
/// Calls checkpoint on mock variant. Should
/// only be used during tests.
#[doc(hidden)]
pub fn checkpoint(&mut self) {
if let HomeVariants::Mock(home) = self {
home.checkpoint();
} else {
panic!("Home should be mock variant!");
}
}
}
impl<M> From<EthereumHome<M>> for Homes
where
M: ethers::providers::Middleware + 'static,
{
fn from(home: EthereumHome<M>) -> Self {
HomeVariants::Ethereum(Box::new(home)).into()
}
}
impl From<MockHomeContract> for Homes {
fn from(mock_home: MockHomeContract) -> Self {
HomeVariants::Mock(Box::new(mock_home)).into()
}
}
impl From<Box<dyn Home>> for Homes {
fn from(home: Box<dyn Home>) -> Self {
HomeVariants::Other(home).into()
}
}
#[async_trait]
impl Home for HomeVariants {
fn local_domain(&self) -> u32 {
match self {
HomeVariants::Ethereum(home) => home.local_domain(),
HomeVariants::Mock(mock_home) => mock_home.local_domain(),
HomeVariants::Other(home) => home.local_domain(),
}
}
fn home_domain_hash(&self) -> H256 {
match self {
HomeVariants::Ethereum(home) => home.home_domain_hash(),
HomeVariants::Mock(mock_home) => mock_home.home_domain_hash(),
HomeVariants::Other(home) => home.home_domain_hash(),
}
}
#[instrument(level = "trace", err)]
async fn nonces(&self, destination: u32) -> Result<u32, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.nonces(destination).await,
HomeVariants::Mock(mock_home) => mock_home.nonces(destination).await,
HomeVariants::Other(home) => home.nonces(destination).await,
}
}
#[instrument(level = "trace", err)]
async fn dispatch(&self, message: &Message) -> Result<TxOutcome, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.dispatch(message).await,
HomeVariants::Mock(mock_home) => mock_home.dispatch(message).await,
HomeVariants::Other(home) => home.dispatch(message).await,
}
}
async fn queue_contains(&self, root: H256) -> Result<bool, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.queue_contains(root).await,
HomeVariants::Mock(mock_home) => mock_home.queue_contains(root).await,
HomeVariants::Other(home) => home.queue_contains(root).await,
}
}
async fn improper_update(
&self,
update: &SignedUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.improper_update(update).await,
HomeVariants::Mock(mock_home) => mock_home.improper_update(update).await,
HomeVariants::Other(home) => home.improper_update(update).await,
}
}
#[instrument(err)]
async fn produce_update(&self) -> Result<Option<Update>, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.produce_update().await,
HomeVariants::Mock(mock_home) => mock_home.produce_update().await,
HomeVariants::Other(home) => home.produce_update().await,
}
}
}
#[async_trait]
impl Common for HomeVariants {
fn name(&self) -> &str {
match self {
HomeVariants::Ethereum(home) => home.name(),
HomeVariants::Mock(mock_home) => mock_home.name(),
HomeVariants::Other(home) => home.name(),
}
}
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.status(txid).await,
HomeVariants::Mock(mock_home) => mock_home.status(txid).await,
HomeVariants::Other(home) => home.status(txid).await,
}
}
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.updater().await,
HomeVariants::Mock(mock_home) => mock_home.updater().await,
HomeVariants::Other(home) => home.updater().await,
}
}
async fn state(&self) -> Result<State, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.state().await,
HomeVariants::Mock(mock_home) => mock_home.state().await,
HomeVariants::Other(home) => home.state().await,
}
}
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.committed_root().await,
HomeVariants::Mock(mock_home) => mock_home.committed_root().await,
HomeVariants::Other(home) => home.committed_root().await,
}
}
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.update(update).await,
HomeVariants::Mock(mock_home) => mock_home.update(update).await,
HomeVariants::Other(home) => home.update(update).await,
}
}
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
HomeVariants::Ethereum(home) => home.double_update(double).await,
HomeVariants::Mock(mock_home) => mock_home.double_update(double).await,
HomeVariants::Other(home) => home.double_update(double).await,
}
}
}

@ -143,8 +143,8 @@ impl AbacusCommon for CachingInbox {
pub struct Inboxes(Arc<InboxVariants>); pub struct Inboxes(Arc<InboxVariants>);
impl From<InboxVariants> for Inboxes { impl From<InboxVariants> for Inboxes {
fn from(homes: InboxVariants) -> Self { fn from(inboxes: InboxVariants) -> Self {
Self(Arc::new(homes)) Self(Arc::new(inboxes))
} }
} }

@ -1,94 +1,8 @@
use abacus_core::{ use abacus_core::{AbacusCommonIndexer, CheckpointWithMeta, OutboxIndexer, RawCommittedMessage};
AbacusCommonIndexer, CheckpointWithMeta, CommonIndexer, HomeIndexer, OutboxIndexer, use abacus_test::mocks::indexer::MockAbacusIndexer;
RawCommittedMessage, SignedUpdateWithMeta,
};
use abacus_test::mocks::{indexer::MockAbacusIndexer, MockIndexer};
use async_trait::async_trait; use async_trait::async_trait;
use color_eyre::Result; use color_eyre::Result;
/// Home/Replica CommonIndexer type
#[derive(Debug)]
pub enum CommonIndexers {
/// Ethereum contract indexer
Ethereum(Box<dyn CommonIndexer>),
/// Mock indexer
Mock(Box<dyn CommonIndexer>),
/// Other indexer variant
Other(Box<dyn CommonIndexer>),
}
impl From<MockIndexer> for CommonIndexers {
fn from(mock_indexer: MockIndexer) -> Self {
CommonIndexers::Mock(Box::new(mock_indexer))
}
}
#[async_trait]
impl CommonIndexer for CommonIndexers {
async fn get_block_number(&self) -> Result<u32> {
match self {
CommonIndexers::Ethereum(indexer) => indexer.get_block_number().await,
CommonIndexers::Mock(indexer) => indexer.get_block_number().await,
CommonIndexers::Other(indexer) => indexer.get_block_number().await,
}
}
async fn fetch_sorted_updates(&self, from: u32, to: u32) -> Result<Vec<SignedUpdateWithMeta>> {
match self {
CommonIndexers::Ethereum(indexer) => indexer.fetch_sorted_updates(from, to).await,
CommonIndexers::Mock(indexer) => indexer.fetch_sorted_updates(from, to).await,
CommonIndexers::Other(indexer) => indexer.fetch_sorted_updates(from, to).await,
}
}
}
/// HomeIndexer type
#[derive(Debug)]
pub enum HomeIndexers {
/// Ethereum contract indexer
Ethereum(Box<dyn HomeIndexer>),
/// Mock indexer
Mock(Box<dyn HomeIndexer>),
/// Other indexer variant
Other(Box<dyn HomeIndexer>),
}
impl From<MockIndexer> for HomeIndexers {
fn from(mock_indexer: MockIndexer) -> Self {
HomeIndexers::Mock(Box::new(mock_indexer))
}
}
#[async_trait]
impl CommonIndexer for HomeIndexers {
async fn get_block_number(&self) -> Result<u32> {
match self {
HomeIndexers::Ethereum(indexer) => indexer.get_block_number().await,
HomeIndexers::Mock(indexer) => indexer.get_block_number().await,
HomeIndexers::Other(indexer) => indexer.get_block_number().await,
}
}
async fn fetch_sorted_updates(&self, from: u32, to: u32) -> Result<Vec<SignedUpdateWithMeta>> {
match self {
HomeIndexers::Ethereum(indexer) => indexer.fetch_sorted_updates(from, to).await,
HomeIndexers::Mock(indexer) => indexer.fetch_sorted_updates(from, to).await,
HomeIndexers::Other(indexer) => indexer.fetch_sorted_updates(from, to).await,
}
}
}
#[async_trait]
impl HomeIndexer for HomeIndexers {
async fn fetch_sorted_messages(&self, from: u32, to: u32) -> Result<Vec<RawCommittedMessage>> {
match self {
HomeIndexers::Ethereum(indexer) => indexer.fetch_sorted_messages(from, to).await,
HomeIndexers::Mock(indexer) => indexer.fetch_sorted_messages(from, to).await,
HomeIndexers::Other(indexer) => indexer.fetch_sorted_messages(from, to).await,
}
}
}
/// Outbox/Inbox CommonIndexer type /// Outbox/Inbox CommonIndexer type
#[derive(Debug)] #[derive(Debug)]
pub enum AbacusCommonIndexers { pub enum AbacusCommonIndexers {

@ -2,7 +2,7 @@
//! It has common utils and tools for configuring the app, interacting with the //! It has common utils and tools for configuring the app, interacting with the
//! smart contracts, etc. //! smart contracts, etc.
//! //!
//! Implementations of the `Home` and `Replica` traits on different chains //! Implementations of the `Outbox` and `Inbox` traits on different chains
//! ought to live here. //! ought to live here.
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
@ -22,10 +22,6 @@ pub use agent::*;
mod macros; mod macros;
pub use macros::*; pub use macros::*;
/// Home type
mod home;
pub use home::*;
/// outbox type /// outbox type
mod outbox; mod outbox;
pub use outbox::*; pub use outbox::*;
@ -34,13 +30,6 @@ pub use outbox::*;
mod inbox; mod inbox;
pub use inbox::*; pub use inbox::*;
/// Replica type
mod replica;
pub use replica::*;
/// XAppConnectionManager type
mod xapp;
pub use xapp::*;
mod metrics; mod metrics;
pub use metrics::*; pub use metrics::*;

@ -15,8 +15,8 @@ macro_rules! cancel_task {
/// Shortcut for implementing agent traits /// Shortcut for implementing agent traits
macro_rules! impl_as_ref_core { macro_rules! impl_as_ref_core {
($agent:ident) => { ($agent:ident) => {
impl AsRef<abacus_base::AgentCore> for $agent { impl AsRef<abacus_base::AbacusAgentCore> for $agent {
fn as_ref(&self) -> &abacus_base::AgentCore { fn as_ref(&self) -> &abacus_base::AbacusAgentCore {
&self.core &self.core
} }
} }
@ -36,7 +36,7 @@ macro_rules! decl_agent {
#[derive(Debug)] #[derive(Debug)]
pub struct $name { pub struct $name {
$($prop: $type,)* $($prop: $type,)*
core: abacus_base::AgentCore, core: abacus_base::AbacusAgentCore,
} }
$crate::impl_as_ref_core!($name); $crate::impl_as_ref_core!($name);
@ -57,10 +57,11 @@ macro_rules! decl_agent {
/// ### Usage /// ### Usage
/// ///
/// ```ignore /// ```ignore
/// decl_settings!(Updater { /// decl_settings!(Validator {
/// updater: SignerConf, /// validator: SignerConf,
/// polling_interval: String, /// checkpointsyncer: CheckpointSyncerConf,
/// update_pause: String, /// reorgperiod: String,
/// interval: String,
/// }); /// });
/// ``` /// ```
macro_rules! decl_settings { macro_rules! decl_settings {

@ -95,7 +95,7 @@ impl CoreMetrics {
/// Register an int gauge. /// Register an int gauge.
/// ///
/// If this metric is per-replica, use `new_replica_int_gauge` /// If this metric is per-inbox, use `new_inbox_int_gauge`
pub fn new_int_gauge( pub fn new_int_gauge(
&self, &self,
metric_name: &str, metric_name: &str,
@ -115,7 +115,7 @@ impl CoreMetrics {
/// Register an int counter. /// Register an int counter.
/// ///
/// If this metric is per-replica, use `new_replica_int_counter` /// If this metric is per-inbox, use `new_inbox_int_counter`
pub fn new_int_counter( pub fn new_int_counter(
&self, &self,
metric_name: &str, metric_name: &str,

@ -118,8 +118,8 @@ impl OutboxEvents for CachingOutbox {
nonce: u32, nonce: u32,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> { ) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {
loop { loop {
if let Some(update) = self.db.message_by_nonce(destination, nonce)? { if let Some(message) = self.db.message_by_nonce(destination, nonce)? {
return Ok(Some(update)); return Ok(Some(message));
} }
sleep(Duration::from_millis(500)).await; sleep(Duration::from_millis(500)).await;
} }
@ -131,8 +131,8 @@ impl OutboxEvents for CachingOutbox {
leaf: H256, leaf: H256,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> { ) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {
loop { loop {
if let Some(update) = self.db.message_by_leaf(leaf)? { if let Some(message) = self.db.message_by_leaf(leaf)? {
return Ok(Some(update)); return Ok(Some(message));
} }
sleep(Duration::from_millis(500)).await; sleep(Duration::from_millis(500)).await;
} }
@ -143,8 +143,8 @@ impl OutboxEvents for CachingOutbox {
tree_index: usize, tree_index: usize,
) -> Result<Option<H256>, ChainCommunicationError> { ) -> Result<Option<H256>, ChainCommunicationError> {
loop { loop {
if let Some(update) = self.db.leaf_by_leaf_index(tree_index as u32)? { if let Some(leaf) = self.db.leaf_by_leaf_index(tree_index as u32)? {
return Ok(Some(update)); return Ok(Some(leaf));
} }
sleep(Duration::from_millis(500)).await; sleep(Duration::from_millis(500)).await;
} }

@ -1,366 +0,0 @@
use abacus_core::{
accumulator::merkle::Proof, db::AbacusDB, AbacusMessage, ChainCommunicationError, Common,
CommonEvents, DoubleUpdate, MessageStatus, Replica, SignedUpdate, State, TxOutcome,
};
use async_trait::async_trait;
use color_eyre::eyre::Result;
use ethers::core::types::H256;
use abacus_ethereum::EthereumReplica;
use abacus_test::mocks::MockReplicaContract;
use std::str::FromStr;
use std::sync::Arc;
use tokio::task::JoinHandle;
use tokio::time::{sleep, Duration};
use tracing::{info_span, Instrument};
use tracing::{instrument, instrument::Instrumented};
use crate::{CommonIndexers, ContractSync, ContractSyncMetrics, IndexSettings};
/// Caching replica type
#[derive(Debug)]
pub struct CachingReplica {
replica: Replicas,
db: AbacusDB,
indexer: Arc<CommonIndexers>,
}
impl std::fmt::Display for CachingReplica {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl CachingReplica {
/// Instantiate new CachingReplica
pub fn new(replica: Replicas, db: AbacusDB, indexer: Arc<CommonIndexers>) -> Self {
Self {
replica,
db,
indexer,
}
}
/// Return handle on home object
pub fn replica(&self) -> Replicas {
self.replica.clone()
}
/// Return handle on AbacusDB
pub fn db(&self) -> AbacusDB {
self.db.clone()
}
/// Spawn a task that syncs the CachingReplica's db with the on-chain event
/// data
pub fn sync(
&self,
agent_name: String,
index_settings: IndexSettings,
metrics: ContractSyncMetrics,
) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("ReplicaContractSync", self = %self);
let sync = ContractSync::new(
agent_name,
String::from_str(self.replica.name()).expect("!string"),
self.db.clone(),
self.indexer.clone(),
index_settings,
metrics,
);
tokio::spawn(async move {
let _ = sync.sync_updates().await?;
Ok(())
})
.instrument(span)
}
}
#[async_trait]
impl Replica for CachingReplica {
fn local_domain(&self) -> u32 {
self.replica.local_domain()
}
async fn remote_domain(&self) -> Result<u32, ChainCommunicationError> {
self.replica.remote_domain().await
}
async fn prove(&self, proof: &Proof) -> Result<TxOutcome, ChainCommunicationError> {
self.replica.prove(proof).await
}
async fn process(&self, message: &AbacusMessage) -> Result<TxOutcome, ChainCommunicationError> {
self.replica.process(message).await
}
async fn message_status(&self, leaf: H256) -> Result<MessageStatus, ChainCommunicationError> {
self.replica.message_status(leaf).await
}
async fn acceptable_root(&self, root: H256) -> Result<bool, ChainCommunicationError> {
self.replica.acceptable_root(root).await
}
}
#[async_trait]
impl Common for CachingReplica {
fn name(&self) -> &str {
self.replica.name()
}
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
self.replica.status(txid).await
}
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
self.replica.updater().await
}
async fn state(&self) -> Result<State, ChainCommunicationError> {
self.replica.state().await
}
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
self.replica.committed_root().await
}
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
self.replica.update(update).await
}
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
self.replica.double_update(double).await
}
}
#[async_trait]
impl CommonEvents for CachingReplica {
#[tracing::instrument(err)]
async fn signed_update_by_old_root(
&self,
old_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.update_by_previous_root(old_root)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
}
}
#[tracing::instrument(err)]
async fn signed_update_by_new_root(
&self,
new_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError> {
loop {
if let Some(update) = self.db.update_by_new_root(new_root)? {
return Ok(Some(update));
}
sleep(Duration::from_millis(500)).await;
}
}
}
#[derive(Debug, Clone)]
/// Arc wrapper for ReplicaVariants enum
pub struct Replicas(Arc<ReplicaVariants>);
impl From<ReplicaVariants> for Replicas {
fn from(homes: ReplicaVariants) -> Self {
Self(Arc::new(homes))
}
}
impl std::ops::Deref for Replicas {
type Target = Arc<ReplicaVariants>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for Replicas {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Replica type
#[derive(Debug)]
pub enum ReplicaVariants {
/// Ethereum replica contract
Ethereum(Box<dyn Replica>),
/// Mock replica contract
Mock(Box<MockReplicaContract>),
/// Other replica variant
Other(Box<dyn Replica>),
}
impl ReplicaVariants {
/// Calls checkpoint on mock variant. Should
/// only be used during tests.
#[doc(hidden)]
pub fn checkpoint(&mut self) {
if let ReplicaVariants::Mock(replica) = self {
replica.checkpoint();
} else {
panic!("Replica should be mock variant!");
}
}
}
impl<M> From<EthereumReplica<M>> for Replicas
where
M: ethers::providers::Middleware + 'static,
{
fn from(replica: EthereumReplica<M>) -> Self {
ReplicaVariants::Ethereum(Box::new(replica)).into()
}
}
impl From<MockReplicaContract> for Replicas {
fn from(mock_replica: MockReplicaContract) -> Self {
ReplicaVariants::Mock(Box::new(mock_replica)).into()
}
}
impl From<Box<dyn Replica>> for Replicas {
fn from(replica: Box<dyn Replica>) -> Self {
ReplicaVariants::Other(replica).into()
}
}
#[async_trait]
impl Replica for ReplicaVariants {
fn local_domain(&self) -> u32 {
match self {
ReplicaVariants::Ethereum(replica) => replica.local_domain(),
ReplicaVariants::Mock(mock_replica) => mock_replica.local_domain(),
ReplicaVariants::Other(replica) => replica.local_domain(),
}
}
async fn remote_domain(&self) -> Result<u32, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.remote_domain().await,
ReplicaVariants::Mock(mock_replica) => mock_replica.remote_domain().await,
ReplicaVariants::Other(replica) => replica.remote_domain().await,
}
}
async fn prove(&self, proof: &Proof) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.prove(proof).await,
ReplicaVariants::Mock(mock_replica) => mock_replica.prove(proof).await,
ReplicaVariants::Other(replica) => replica.prove(proof).await,
}
}
async fn process(&self, message: &AbacusMessage) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.process(message).await,
ReplicaVariants::Mock(mock_replica) => mock_replica.process(message).await,
ReplicaVariants::Other(replica) => replica.process(message).await,
}
}
async fn message_status(&self, leaf: H256) -> Result<MessageStatus, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.message_status(leaf).await,
ReplicaVariants::Mock(mock_replica) => mock_replica.message_status(leaf).await,
ReplicaVariants::Other(replica) => replica.message_status(leaf).await,
}
}
async fn prove_and_process(
&self,
message: &AbacusMessage,
proof: &Proof,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.prove_and_process(message, proof).await,
ReplicaVariants::Mock(mock_replica) => {
mock_replica.prove_and_process(message, proof).await
}
ReplicaVariants::Other(replica) => replica.prove_and_process(message, proof).await,
}
}
async fn acceptable_root(&self, root: H256) -> Result<bool, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.acceptable_root(root).await,
ReplicaVariants::Mock(mock_replica) => mock_replica.acceptable_root(root).await,
ReplicaVariants::Other(replica) => replica.acceptable_root(root).await,
}
}
}
#[async_trait]
impl Common for ReplicaVariants {
fn name(&self) -> &str {
match self {
ReplicaVariants::Ethereum(replica) => replica.name(),
ReplicaVariants::Mock(mock_replica) => mock_replica.name(),
ReplicaVariants::Other(replica) => replica.name(),
}
}
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.status(txid).await,
ReplicaVariants::Mock(mock_replica) => mock_replica.status(txid).await,
ReplicaVariants::Other(replica) => replica.status(txid).await,
}
}
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.updater().await,
ReplicaVariants::Mock(mock_replica) => mock_replica.updater().await,
ReplicaVariants::Other(replica) => replica.updater().await,
}
}
async fn state(&self) -> Result<State, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.state().await,
ReplicaVariants::Mock(mock_replica) => mock_replica.state().await,
ReplicaVariants::Other(replica) => replica.state().await,
}
}
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.committed_root().await,
ReplicaVariants::Mock(mock_replica) => mock_replica.committed_root().await,
ReplicaVariants::Other(replica) => replica.committed_root().await,
}
}
#[instrument(fields(update = %update.update))]
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.update(update).await,
ReplicaVariants::Mock(mock_replica) => mock_replica.update(update).await,
ReplicaVariants::Other(replica) => replica.update(update).await,
}
}
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ReplicaVariants::Ethereum(replica) => replica.double_update(double).await,
ReplicaVariants::Mock(mock_replica) => mock_replica.double_update(double).await,
ReplicaVariants::Other(replica) => replica.double_update(double).await,
}
}
}

@ -2,14 +2,9 @@ use color_eyre::Report;
use serde::Deserialize; use serde::Deserialize;
use abacus_core::{ContractLocator, Signers}; use abacus_core::{ContractLocator, Signers};
use abacus_ethereum::{ use abacus_ethereum::{make_inbox, make_outbox, Connection};
make_conn_manager, make_home, make_inbox, make_outbox, make_replica, Connection,
};
use crate::{ use crate::{InboxVariants, Inboxes, OutboxVariants, Outboxes};
home::Homes, replica::Replicas, xapp::ConnectionManagers, HomeVariants, InboxVariants, Inboxes,
OutboxVariants, Outboxes, ReplicaVariants,
};
/// A connection to _some_ blockchain. /// A connection to _some_ blockchain.
/// ///
@ -28,8 +23,8 @@ impl Default for ChainConf {
} }
} }
/// A chain setup is a domain ID, an address on that chain (where the home or /// A chain setup is a domain ID, an address on that chain (where the outbox or
/// replica is deployed) and details for connecting to the chain API. /// inbox is deployed) and details for connecting to the chain API.
#[derive(Clone, Debug, Deserialize, Default)] #[derive(Clone, Debug, Deserialize, Default)]
pub struct ChainSetup { pub struct ChainSetup {
/// Chain name /// Chain name
@ -41,31 +36,12 @@ pub struct ChainSetup {
/// The chain connection details /// The chain connection details
#[serde(flatten)] #[serde(flatten)]
pub chain: ChainConf, pub chain: ChainConf,
/// Set this key to disable the replica. Does nothing for homes. /// Set this key to disable the inbox. Does nothing for outboxes.
#[serde(default)] #[serde(default)]
pub disabled: Option<String>, pub disabled: Option<String>,
} }
impl ChainSetup { impl ChainSetup {
/// Try to convert the chain setting into a Home contract
pub async fn try_into_home(&self, signer: Option<Signers>) -> Result<Homes, Report> {
match &self.chain {
ChainConf::Ethereum(conf) => Ok(HomeVariants::Ethereum(
make_home(
conf.clone(),
&ContractLocator {
name: self.name.clone(),
domain: self.domain.parse().expect("invalid uint"),
address: self.address.parse::<ethers::types::Address>()?.into(),
},
signer,
)
.await?,
)
.into()),
}
}
/// Try to convert the chain setting into a Outbox contract /// Try to convert the chain setting into a Outbox contract
pub async fn try_into_outbox(&self, signer: Option<Signers>) -> Result<Outboxes, Report> { pub async fn try_into_outbox(&self, signer: Option<Signers>) -> Result<Outboxes, Report> {
match &self.chain { match &self.chain {
@ -85,25 +61,6 @@ impl ChainSetup {
} }
} }
/// Try to convert the chain setting into a replica contract
pub async fn try_into_replica(&self, signer: Option<Signers>) -> Result<Replicas, Report> {
match &self.chain {
ChainConf::Ethereum(conf) => Ok(ReplicaVariants::Ethereum(
make_replica(
conf.clone(),
&ContractLocator {
name: self.name.clone(),
domain: self.domain.parse().expect("invalid uint"),
address: self.address.parse::<ethers::types::Address>()?.into(),
},
signer,
)
.await?,
)
.into()),
}
}
/// Try to convert the chain setting into a inbox contract /// Try to convert the chain setting into a inbox contract
pub async fn try_into_inbox(&self, signer: Option<Signers>) -> Result<Inboxes, Report> { pub async fn try_into_inbox(&self, signer: Option<Signers>) -> Result<Inboxes, Report> {
match &self.chain { match &self.chain {
@ -122,25 +79,4 @@ impl ChainSetup {
.into()), .into()),
} }
} }
/// Try to convert chain setting into XAppConnectionManager contract
pub async fn try_into_connection_manager(
&self,
signer: Option<Signers>,
) -> Result<ConnectionManagers, Report> {
match &self.chain {
ChainConf::Ethereum(conf) => Ok(ConnectionManagers::Ethereum(
make_conn_manager(
conf.clone(),
&ContractLocator {
name: self.name.clone(),
domain: self.domain.parse().expect("invalid uint"),
address: self.address.parse::<ethers::types::Address>()?.into(),
},
signer,
)
.await?,
)),
}
}
} }

@ -5,7 +5,7 @@
//! Abacus Agents have a shared core, which contains connection info for rpc, //! Abacus Agents have a shared core, which contains connection info for rpc,
//! relevant contract addresses on each chain, etc. In addition, each agent has //! relevant contract addresses on each chain, etc. In addition, each agent has
//! agent-specific settings. Be convention, we represent these as a base config //! agent-specific settings. Be convention, we represent these as a base config
//! per-Home contract, and a "partial" config per agent. On bootup, the agent //! per-Outbox contract, and a "partial" config per agent. On bootup, the agent
//! loads the configuration, establishes RPC connections, and monitors each //! loads the configuration, establishes RPC connections, and monitors each
//! configured chain. //! configured chain.
//! //!
@ -28,26 +28,21 @@
//! env vars. `$RUN_ENV/$BASE_CONFIG` //! env vars. `$RUN_ENV/$BASE_CONFIG`
//! 2. The config file specified by the `RUN_ENV` env var and the //! 2. The config file specified by the `RUN_ENV` env var and the
//! agent's name. `$RUN_ENV/{agent}-partial.json`. //! agent's name. `$RUN_ENV/{agent}-partial.json`.
//! E.g. `$RUN_ENV/updater-partial.json` //! E.g. `$RUN_ENV/validator-partial.json`
//! 3. Configuration env vars with the prefix `OPT_BASE` intended //! 3. Configuration env vars with the prefix `OPT_BASE` intended
//! to be shared by multiple agents in the same environment //! to be shared by multiple agents in the same environment
//! E.g. `export OPT_BASE_REPLICAS_KOVAN_DOMAIN=3000` //! E.g. `export OPT_BASE_INBOXES_KOVAN_DOMAIN=3000`
//! 4. Configuration env vars with the prefix `OPT_{agent name}` //! 4. Configuration env vars with the prefix `OPT_{agent name}`
//! intended to be used by a specific agent. //! intended to be used by a specific agent.
//! E.g. `export OPT_KATHY_CHAT_TYPE="static message"` //! E.g. `export OPT_KATHY_CHAT_TYPE="static message"`
use crate::{ use crate::{AbacusAgentCore, AbacusCommonIndexers, CachingInbox, CachingOutbox, OutboxIndexers};
agent::AgentCore, AbacusAgentCore, AbacusCommonIndexers, CachingHome, CachingInbox,
CachingOutbox, CachingReplica, CommonIndexers, HomeIndexers, OutboxIndexers,
};
use abacus_core::{ use abacus_core::{
db::{AbacusDB, DB}, db::{AbacusDB, DB},
utils::HexString, utils::HexString,
AbacusCommon, Common, ContractLocator, Signers, AbacusCommon, ContractLocator, Signers,
};
use abacus_ethereum::{
make_home_indexer, make_inbox_indexer, make_outbox_indexer, make_replica_indexer,
}; };
use abacus_ethereum::{make_inbox_indexer, make_outbox_indexer};
use color_eyre::{eyre::bail, Report}; use color_eyre::{eyre::bail, Report};
use config::{Config, ConfigError, Environment, File}; use config::{Config, ConfigError, Environment, File};
use ethers::prelude::AwsSigner; use ethers::prelude::AwsSigner;
@ -124,13 +119,13 @@ impl SignerConf {
} }
} }
/// Home indexing settings /// Outbox indexing settings
#[derive(Debug, Deserialize, Default, Clone)] #[derive(Debug, Deserialize, Default, Clone)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct IndexSettings { pub struct IndexSettings {
/// The height at which to start indexing the Home contract /// The height at which to start indexing the Outbox contract
pub from: Option<String>, pub from: Option<String>,
/// The number of blocks to query at once at which to start indexing the Home contract /// The number of blocks to query at once at which to start indexing the Outbox contract
pub chunk: Option<String>, pub chunk: Option<String>,
} }
@ -183,16 +178,13 @@ pub struct Settings {
pub db: String, pub db: String,
/// Port to listen for prometheus scrape requests /// Port to listen for prometheus scrape requests
pub metrics: Option<String>, pub metrics: Option<String>,
/// Settings for the home indexer /// Settings for the outbox indexer
#[serde(default)] #[serde(default)]
pub index: IndexSettings, pub index: IndexSettings,
/// TODO: In this transitionary period, home and replicas /// The outbox configuration
/// fields are reused for both home/replicas for optics agents pub outbox: ChainSetup,
/// and outbox/inboxes for abacus agents /// The inbox configurations
/// The home configuration pub inboxes: HashMap<String, ChainSetup>,
pub home: ChainSetup,
/// The replica configurations
pub replicas: HashMap<String, ChainSetup>,
/// The tracing configuration /// The tracing configuration
pub tracing: TracingConfig, pub tracing: TracingConfig,
/// Transaction signers /// Transaction signers
@ -206,8 +198,8 @@ impl Settings {
db: self.db.clone(), db: self.db.clone(),
metrics: self.metrics.clone(), metrics: self.metrics.clone(),
index: self.index.clone(), index: self.index.clone(),
home: self.home.clone(), outbox: self.outbox.clone(),
replicas: self.replicas.clone(), inboxes: self.inboxes.clone(),
tracing: self.tracing.clone(), tracing: self.tracing.clone(),
signers: self.signers.clone(), signers: self.signers.clone(),
} }
@ -220,39 +212,13 @@ impl Settings {
self.signers.get(name)?.try_into_signer().await.ok() self.signers.get(name)?.try_into_signer().await.ok()
} }
/// Try to get all replicas from this settings object
pub async fn try_caching_replicas(
&self,
db: DB,
) -> Result<HashMap<String, Arc<CachingReplica>>, Report> {
let mut result = HashMap::default();
for (k, v) in self.replicas.iter().filter(|(_, v)| v.disabled.is_none()) {
if k != &v.name {
bail!(
"Replica key does not match replica name:\n key: {} name: {}",
k,
v.name
);
}
let signer = self.get_signer(&v.name).await;
let replica = v.try_into_replica(signer).await?;
let indexer = Arc::new(self.try_replica_indexer(v).await?);
let abacus_db = AbacusDB::new(replica.name(), db.clone());
result.insert(
v.name.clone(),
Arc::new(CachingReplica::new(replica, abacus_db, indexer)),
);
}
Ok(result)
}
/// Try to get all inboxes from this settings object /// Try to get all inboxes from this settings object
pub async fn try_caching_inboxes( pub async fn try_caching_inboxes(
&self, &self,
db: DB, db: DB,
) -> Result<HashMap<String, Arc<CachingInbox>>, Report> { ) -> Result<HashMap<String, Arc<CachingInbox>>, Report> {
let mut result = HashMap::default(); let mut result = HashMap::default();
for (k, v) in self.replicas.iter().filter(|(_, v)| v.disabled.is_none()) { for (k, v) in self.inboxes.iter().filter(|(_, v)| v.disabled.is_none()) {
if k != &v.name { if k != &v.name {
bail!( bail!(
"Inbox key does not match inbox name:\n key: {} name: {}", "Inbox key does not match inbox name:\n key: {} name: {}",
@ -272,19 +238,10 @@ impl Settings {
Ok(result) Ok(result)
} }
/// Try to get a home object
pub async fn try_caching_home(&self, db: DB) -> Result<CachingHome, Report> {
let signer = self.get_signer(&self.home.name).await;
let home = self.home.try_into_home(signer).await?;
let indexer = Arc::new(self.try_home_indexer().await?);
let abacus_db = AbacusDB::new(home.name(), db);
Ok(CachingHome::new(home, abacus_db, indexer))
}
/// Try to get a outbox object /// Try to get a outbox object
pub async fn try_caching_outbox(&self, db: DB) -> Result<CachingOutbox, Report> { pub async fn try_caching_outbox(&self, db: DB) -> Result<CachingOutbox, Report> {
let signer = self.get_signer(&self.home.name).await; let signer = self.get_signer(&self.outbox.name).await;
let outbox = self.home.try_into_outbox(signer).await?; let outbox = self.outbox.try_into_outbox(signer).await?;
let indexer = Arc::new(self.try_outbox_indexer().await?); let indexer = Arc::new(self.try_outbox_indexer().await?);
let abacus_db = AbacusDB::new(outbox.name(), db); let abacus_db = AbacusDB::new(outbox.name(), db);
Ok(CachingOutbox::new(outbox, abacus_db, indexer)) Ok(CachingOutbox::new(outbox, abacus_db, indexer))
@ -292,60 +249,20 @@ impl Settings {
/// Try to get an indexer object for a outbox /// Try to get an indexer object for a outbox
pub async fn try_outbox_indexer(&self) -> Result<OutboxIndexers, Report> { pub async fn try_outbox_indexer(&self) -> Result<OutboxIndexers, Report> {
let signer = self.get_signer(&self.home.name).await; let signer = self.get_signer(&self.outbox.name).await;
match &self.home.chain { match &self.outbox.chain {
ChainConf::Ethereum(conn) => Ok(OutboxIndexers::Ethereum( ChainConf::Ethereum(conn) => Ok(OutboxIndexers::Ethereum(
make_outbox_indexer( make_outbox_indexer(
conn.clone(), conn.clone(),
&ContractLocator { &ContractLocator {
name: self.home.name.clone(), name: self.outbox.name.clone(),
domain: self.home.domain.parse().expect("invalid uint"), domain: self.outbox.domain.parse().expect("invalid uint"),
address: self.home.address.parse::<ethers::types::Address>()?.into(), address: self
}, .outbox
signer, .address
self.index.from(), .parse::<ethers::types::Address>()?
self.index.chunk_size(), .into(),
)
.await?,
)),
}
}
/// Try to get an indexer object for a home
pub async fn try_home_indexer(&self) -> Result<HomeIndexers, Report> {
let signer = self.get_signer(&self.home.name).await;
match &self.home.chain {
ChainConf::Ethereum(conn) => Ok(HomeIndexers::Ethereum(
make_home_indexer(
conn.clone(),
&ContractLocator {
name: self.home.name.clone(),
domain: self.home.domain.parse().expect("invalid uint"),
address: self.home.address.parse::<ethers::types::Address>()?.into(),
},
signer,
self.index.from(),
self.index.chunk_size(),
)
.await?,
)),
}
}
/// Try to get an indexer object for a replica
pub async fn try_replica_indexer(&self, setup: &ChainSetup) -> Result<CommonIndexers, Report> {
let signer = self.get_signer(&setup.name).await;
match &setup.chain {
ChainConf::Ethereum(conn) => Ok(CommonIndexers::Ethereum(
make_replica_indexer(
conn.clone(),
&ContractLocator {
name: setup.name.clone(),
domain: setup.domain.parse().expect("invalid uint"),
address: setup.address.parse::<ethers::types::Address>()?.into(),
}, },
signer, signer,
self.index.from(), self.index.from(),
@ -405,30 +322,6 @@ impl Settings {
}) })
} }
/// Try to generate an agent core for a named agent
pub async fn try_into_core(&self, name: &str) -> Result<AgentCore, Report> {
let metrics = Arc::new(crate::metrics::CoreMetrics::new(
name,
self.metrics
.as_ref()
.map(|v| v.parse::<u16>().expect("metrics port must be u16")),
Arc::new(prometheus::Registry::new()),
)?);
let db = DB::from_path(&self.db)?;
let home = Arc::new(self.try_caching_home(db.clone()).await?);
let replicas = self.try_caching_replicas(db.clone()).await?;
Ok(AgentCore {
home,
replicas,
db,
settings: self.clone(),
metrics,
indexer: self.index.clone(),
})
}
/// Read settings from the config file /// Read settings from the config file
pub fn new() -> Result<Self, ConfigError> { pub fn new() -> Result<Self, ConfigError> {
let mut s = Config::new(); let mut s = Config::new();

@ -1,191 +0,0 @@
use abacus_core::{
AbacusIdentifier, ChainCommunicationError, ConnectionManager, SignedFailureNotification,
TxOutcome,
};
use async_trait::async_trait;
use abacus_ethereum::EthereumConnectionManager;
use abacus_test::mocks::MockConnectionManagerContract;
/// Replica type
#[derive(Debug)]
pub enum ConnectionManagers {
/// Ethereum connection manager contract
Ethereum(Box<dyn ConnectionManager>),
/// Mock connection manager contract
Mock(Box<MockConnectionManagerContract>),
/// Other connection manager variant
Other(Box<dyn ConnectionManager>),
}
impl ConnectionManagers {
/// Calls checkpoint on mock variant. Should
/// only be used during tests.
#[doc(hidden)]
pub fn checkpoint(&mut self) {
if let ConnectionManagers::Mock(connection_manager) = self {
connection_manager.checkpoint();
} else {
panic!("ConnectionManager should be mock variant!");
}
}
}
impl<M> From<EthereumConnectionManager<M>> for ConnectionManagers
where
M: ethers::providers::Middleware + 'static,
{
fn from(connection_manager: EthereumConnectionManager<M>) -> Self {
ConnectionManagers::Ethereum(Box::new(connection_manager))
}
}
impl From<MockConnectionManagerContract> for ConnectionManagers {
fn from(mock_connection_manager: MockConnectionManagerContract) -> Self {
ConnectionManagers::Mock(Box::new(mock_connection_manager))
}
}
impl From<Box<dyn ConnectionManager>> for ConnectionManagers {
fn from(connection_manager: Box<dyn ConnectionManager>) -> Self {
ConnectionManagers::Other(connection_manager)
}
}
#[async_trait]
impl ConnectionManager for ConnectionManagers {
fn local_domain(&self) -> u32 {
match self {
ConnectionManagers::Ethereum(connection_manager) => connection_manager.local_domain(),
ConnectionManagers::Mock(connection_manager) => connection_manager.local_domain(),
ConnectionManagers::Other(connection_manager) => connection_manager.local_domain(),
}
}
async fn is_replica(&self, address: AbacusIdentifier) -> Result<bool, ChainCommunicationError> {
match self {
ConnectionManagers::Ethereum(connection_manager) => {
connection_manager.is_replica(address).await
}
ConnectionManagers::Mock(connection_manager) => {
connection_manager.is_replica(address).await
}
ConnectionManagers::Other(connection_manager) => {
connection_manager.is_replica(address).await
}
}
}
async fn watcher_permission(
&self,
address: AbacusIdentifier,
domain: u32,
) -> Result<bool, ChainCommunicationError> {
match self {
ConnectionManagers::Ethereum(connection_manager) => {
connection_manager.watcher_permission(address, domain).await
}
ConnectionManagers::Mock(connection_manager) => {
connection_manager.watcher_permission(address, domain).await
}
ConnectionManagers::Other(connection_manager) => {
connection_manager.watcher_permission(address, domain).await
}
}
}
async fn owner_enroll_replica(
&self,
replica: AbacusIdentifier,
domain: u32,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ConnectionManagers::Ethereum(connection_manager) => {
connection_manager
.owner_enroll_replica(replica, domain)
.await
}
ConnectionManagers::Mock(connection_manager) => {
connection_manager
.owner_enroll_replica(replica, domain)
.await
}
ConnectionManagers::Other(connection_manager) => {
connection_manager
.owner_enroll_replica(replica, domain)
.await
}
}
}
async fn owner_unenroll_replica(
&self,
replica: AbacusIdentifier,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ConnectionManagers::Ethereum(connection_manager) => {
connection_manager.owner_unenroll_replica(replica).await
}
ConnectionManagers::Mock(connection_manager) => {
connection_manager.owner_unenroll_replica(replica).await
}
ConnectionManagers::Other(connection_manager) => {
connection_manager.owner_unenroll_replica(replica).await
}
}
}
async fn set_home(&self, home: AbacusIdentifier) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ConnectionManagers::Ethereum(connection_manager) => {
connection_manager.set_home(home).await
}
ConnectionManagers::Mock(connection_manager) => connection_manager.set_home(home).await,
ConnectionManagers::Other(connection_manager) => {
connection_manager.set_home(home).await
}
}
}
async fn set_watcher_permission(
&self,
watcher: AbacusIdentifier,
domain: u32,
access: bool,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ConnectionManagers::Ethereum(connection_manager) => {
connection_manager
.set_watcher_permission(watcher, domain, access)
.await
}
ConnectionManagers::Mock(connection_manager) => {
connection_manager
.set_watcher_permission(watcher, domain, access)
.await
}
ConnectionManagers::Other(connection_manager) => {
connection_manager
.set_watcher_permission(watcher, domain, access)
.await
}
}
}
async fn unenroll_replica(
&self,
signed_failure: &SignedFailureNotification,
) -> Result<TxOutcome, ChainCommunicationError> {
match self {
ConnectionManagers::Ethereum(connection_manager) => {
connection_manager.unenroll_replica(signed_failure).await
}
ConnectionManagers::Mock(connection_manager) => {
connection_manager.unenroll_replica(signed_failure).await
}
ConnectionManagers::Other(connection_manager) => {
connection_manager.unenroll_replica(signed_failure).await
}
}
}
}

@ -4,8 +4,7 @@ use abacus_core::test_output::output_functions::*;
fn main() { fn main() {
#[cfg(feature = "output")] #[cfg(feature = "output")]
{ {
output_signed_updates(); output_signed_checkpoints();
output_signed_failure_notifications();
output_message_and_leaf(); output_message_and_leaf();
} }
} }

@ -12,10 +12,6 @@ use sha3::{Digest, Keccak256};
pub const TREE_DEPTH: usize = 32; pub const TREE_DEPTH: usize = 32;
const EMPTY_SLICE: &[H256] = &[]; const EMPTY_SLICE: &[H256] = &[];
pub(super) fn hash(preimage: impl AsRef<[u8]>) -> H256 {
H256::from_slice(Keccak256::digest(preimage.as_ref()).as_slice())
}
pub(super) fn hash_concat(left: impl AsRef<[u8]>, right: impl AsRef<[u8]>) -> H256 { pub(super) fn hash_concat(left: impl AsRef<[u8]>, right: impl AsRef<[u8]>) -> H256 {
H256::from_slice( H256::from_slice(
Keccak256::new() Keccak256::new()

@ -1,13 +1,12 @@
use crate::db::{DbError, TypedDB, DB}; use crate::db::{DbError, TypedDB, DB};
use crate::UpdateMeta;
use crate::{ use crate::{
accumulator::merkle::Proof, traits::RawCommittedMessage, utils, AbacusMessage, accumulator::merkle::Proof, traits::RawCommittedMessage, utils, AbacusMessage,
CommittedMessage, Decode, SignedUpdate, SignedUpdateWithMeta, CommittedMessage, Decode,
}; };
use color_eyre::Result; use color_eyre::Result;
use ethers::core::types::H256; use ethers::core::types::H256;
use tokio::time::sleep; use tokio::time::sleep;
use tracing::{debug, error, info, warn}; use tracing::{debug, info};
use std::future::Future; use std::future::Future;
use std::time::Duration; use std::time::Duration;
@ -16,18 +15,13 @@ use crate::db::iterator::PrefixIterator;
static LEAF_IDX: &str = "leaf_index_"; static LEAF_IDX: &str = "leaf_index_";
static LEAF: &str = "leaf_"; static LEAF: &str = "leaf_";
static PREV_ROOT: &str = "update_prev_root_";
static PROOF: &str = "proof_"; static PROOF: &str = "proof_";
static MESSAGE: &str = "message_"; static MESSAGE: &str = "message_";
static UPDATE: &str = "update_";
static UPDATE_META: &str = "update_metadata_";
static LATEST_ROOT: &str = "update_latest_root_";
static LATEST_LEAF_INDEX: &str = "latest_known_leaf_index_"; static LATEST_LEAF_INDEX: &str = "latest_known_leaf_index_";
static LATEST_LEAF_INDEX_FOR_DESTINATION: &str = "latest_known_leaf_index_for_destination_"; static LATEST_LEAF_INDEX_FOR_DESTINATION: &str = "latest_known_leaf_index_for_destination_";
static UPDATER_PRODUCED_UPDATE: &str = "updater_produced_update_";
static LEAF_PROCESS_STATUS: &str = "leaf_process_status_"; static LEAF_PROCESS_STATUS: &str = "leaf_process_status_";
/// DB handle for storing data tied to a specific home. /// DB handle for storing data tied to a specific Outbox.
/// ///
/// Key structure: ```<entity>_<additional_prefix(es)>_<key>``` /// Key structure: ```<entity>_<additional_prefix(es)>_<key>```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -81,8 +75,7 @@ impl AbacusDB {
/// Store a raw committed message building off of the latest leaf index /// Store a raw committed message building off of the latest leaf index
pub fn store_latest_message(&self, message: &RawCommittedMessage) -> Result<()> { pub fn store_latest_message(&self, message: &RawCommittedMessage) -> Result<()> {
// If there is no latest root, or if this update is on the latest root // If this message is not building off the latest leaf index, log it.
// update latest root
if let Some(idx) = self.retrieve_latest_leaf_index()? { if let Some(idx) = self.retrieve_latest_leaf_index()? {
if idx != message.leaf_index - 1 { if idx != message.leaf_index - 1 {
debug!( debug!(
@ -222,119 +215,6 @@ impl AbacusDB {
} }
} }
/// Store the latest committed
fn store_latest_root(&self, root: H256) -> Result<(), DbError> {
debug!(root = ?root, "storing new latest root in DB");
self.store_encodable("", LATEST_ROOT, &root)
}
/// Retrieve the latest committed
pub fn retrieve_latest_root(&self) -> Result<Option<H256>, DbError> {
self.retrieve_decodable("", LATEST_ROOT)
}
/// Store list of sorted updates and their metadata
pub fn store_updates_and_meta(&self, updates: &[SignedUpdateWithMeta]) -> Result<()> {
for update_with_meta in updates {
self.store_latest_update(&update_with_meta.signed_update)?;
self.store_update_metadata(update_with_meta)?;
info!(
block_number = update_with_meta.metadata.block_number,
previous_root = ?&update_with_meta.signed_update.update.previous_root,
new_root = ?&update_with_meta.signed_update.update.new_root,
"Stored new update in db.",
);
}
Ok(())
}
/// Store update metadata (by update's new root)
///
/// Keys --> Values:
/// - `update_new_root` --> `update_metadata`
pub fn store_update_metadata(
&self,
update_with_meta: &SignedUpdateWithMeta,
) -> Result<(), DbError> {
let new_root = update_with_meta.signed_update.update.new_root;
let metadata = update_with_meta.metadata;
debug!(new_root = ?new_root, metadata = ?metadata, "storing update metadata in DB");
self.store_keyed_encodable(UPDATE_META, &new_root, &metadata)
}
/// Retrieve update metadata (by update's new root)
pub fn retrieve_update_metadata(&self, new_root: H256) -> Result<Option<UpdateMeta>, DbError> {
self.retrieve_keyed_decodable(UPDATE_META, &new_root)
}
/// Store a signed update building off latest root
///
/// Keys --> Values:
/// - `LATEST_ROOT` --> `root`
/// - `new_root` --> `prev_root`
/// - `prev_root` --> `update`
pub fn store_latest_update(&self, update: &SignedUpdate) -> Result<(), DbError> {
debug!(
previous_root = ?update.update.previous_root,
new_root = ?update.update.new_root,
"storing update in DB"
);
// If there is no latest root, or if this update is on the latest root
// update latest root
match self.retrieve_latest_root()? {
Some(root) => {
if root == update.update.previous_root {
self.store_latest_root(update.update.new_root)?;
} else {
warn!(
"Attempted to store update not building off latest root: {:?}",
update
)
}
}
None => self.store_latest_root(update.update.new_root)?,
}
self.store_update(update)
}
/// Store an update.
///
/// Keys --> Values:
/// - `new_root` --> `prev_root`
/// - `prev_root` --> `update`
pub fn store_update(&self, update: &SignedUpdate) -> Result<(), DbError> {
self.store_keyed_encodable(UPDATE, &update.update.previous_root, update)?;
self.store_keyed_encodable(
PREV_ROOT,
&update.update.new_root,
&update.update.previous_root,
)
}
/// Retrieve an update by its previous root
pub fn update_by_previous_root(
&self,
previous_root: H256,
) -> Result<Option<SignedUpdate>, DbError> {
self.retrieve_keyed_decodable(UPDATE, &previous_root)
}
/// Retrieve an update by its new root
pub fn update_by_new_root(&self, new_root: H256) -> Result<Option<SignedUpdate>, DbError> {
let prev_root: Option<H256> = self.retrieve_keyed_decodable(PREV_ROOT, &new_root)?;
match prev_root {
Some(prev_root) => self.update_by_previous_root(prev_root),
None => Ok(None),
}
}
/// Iterate over all leaves /// Iterate over all leaves
pub fn leaf_iterator(&self) -> PrefixIterator<H256> { pub fn leaf_iterator(&self) -> PrefixIterator<H256> {
PrefixIterator::new(self.0.as_ref().prefix_iterator(LEAF_IDX), LEAF_IDX.as_ref()) PrefixIterator::new(self.0.as_ref().prefix_iterator(LEAF_IDX), LEAF_IDX.as_ref())
@ -368,38 +248,6 @@ impl AbacusDB {
} }
} }
/// Store a pending update in the DB for potential submission.
///
/// This does not produce update meta or update the latest update db value.
/// It is used by update production and submission.
pub fn store_produced_update(&self, update: &SignedUpdate) -> Result<(), DbError> {
let existing_opt = self.retrieve_produced_update(update.update.previous_root)?;
if let Some(existing) = existing_opt {
if existing.update.new_root != update.update.new_root {
error!("Updater attempted to store conflicting update. Existing update: {:?}. New conflicting update: {:?}.", &existing, &update);
return Err(DbError::UpdaterConflictError {
existing: existing.update,
conflicting: update.update,
});
}
}
self.store_keyed_encodable(
UPDATER_PRODUCED_UPDATE,
&update.update.previous_root,
update,
)
}
/// Retrieve a pending update from the DB (if one exists).
pub fn retrieve_produced_update(
&self,
previous_root: H256,
) -> Result<Option<SignedUpdate>, DbError> {
self.retrieve_keyed_decodable(UPDATER_PRODUCED_UPDATE, &previous_root)
}
/// Mark leaf as processed /// Mark leaf as processed
pub fn mark_leaf_as_processed(&self, leaf_index: u32) -> Result<(), DbError> { pub fn mark_leaf_as_processed(&self, leaf_index: u32) -> Result<(), DbError> {
debug!(leaf_index = ?leaf_index, "mark leaf as processed"); debug!(leaf_index = ?leaf_index, "mark leaf as processed");

@ -10,11 +10,11 @@ pub mod iterator;
mod typed_db; mod typed_db;
pub use typed_db::*; pub use typed_db::*;
/// DB operations tied to specific home /// DB operations tied to specific Outbox
mod abacus_db; mod abacus_db;
pub use abacus_db::*; pub use abacus_db::*;
use crate::{AbacusError, Decode, Encode, Update}; use crate::{AbacusError, Decode, Encode};
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
/// A KV Store /// A KV Store
@ -35,16 +35,6 @@ pub enum DbError {
/// Abacus Error /// Abacus Error
#[error("{0}")] #[error("{0}")]
AbacusError(#[from] AbacusError), AbacusError(#[from] AbacusError),
/// UpdaterConflictError
///
/// TODO(luke): move this agent-related stuff into abacus-base
#[error("Updater attempted to store conflicting signed update. Existing: {existing:?}. New conflicting: {conflicting:?}.")]
UpdaterConflictError {
/// Existing signed update
existing: Update,
/// Conflicting signed update
conflicting: Update,
},
} }
type Result<T> = std::result::Result<T, DbError>; type Result<T> = std::result::Result<T, DbError>;

@ -11,18 +11,7 @@
/// Accumulator management /// Accumulator management
pub mod accumulator; pub mod accumulator;
/// Model instantatiations of the on-chain structures /// Async Traits for Outboxes & Inboxes for use in applications
pub mod models {
/// A simple Home chain Abacus implementation
mod home;
/// A simple Replica chain Abacus implementation
mod replica;
pub use self::{home::*, replica::*};
}
/// Async Traits for Homes & Replicas for use in applications
mod traits; mod traits;
use ethers_signers::WalletError; use ethers_signers::WalletError;
pub use traits::*; pub use traits::*;
@ -55,13 +44,13 @@ use async_trait::async_trait;
use ethers::{ use ethers::{
core::types::{ core::types::{
transaction::{eip2718::TypedTransaction, eip712::Eip712}, transaction::{eip2718::TypedTransaction, eip712::Eip712},
Address as EthAddress, Signature, SignatureError, H256, Address as EthAddress, Signature, SignatureError,
}, },
prelude::AwsSigner, prelude::AwsSigner,
signers::{AwsSignerError, LocalWallet, Signer}, signers::{AwsSignerError, LocalWallet, Signer},
}; };
/// Enum for validity of a list (of updates or messages) /// Enum for validity of a list (of checkpoints or messages)
#[derive(Debug)] #[derive(Debug)]
pub enum ListValidity { pub enum ListValidity {
/// Empty list /// Empty list
@ -78,18 +67,6 @@ pub enum AbacusError {
/// Signature Error pasthrough /// Signature Error pasthrough
#[error(transparent)] #[error(transparent)]
SignatureError(#[from] SignatureError), SignatureError(#[from] SignatureError),
/// Update does not build off the current root
#[error("Update has wrong current root. Expected: {expected}. Got: {actual}.")]
WrongCurrentRoot {
/// The provided root
actual: H256,
/// The current root
expected: H256,
},
/// Update specifies a new root that is not in the queue. This is an
/// improper update and is slashable
#[error("Update has unknown new root: {0}")]
UnknownNewRoot(H256),
/// IO error from Read/Write usage /// IO error from Read/Write usage
#[error(transparent)] #[error(transparent)]
IoError(#[from] std::io::Error), IoError(#[from] std::io::Error),
@ -205,6 +182,8 @@ impl<T> SignerExt for T where T: Signer {}
mod test { mod test {
use super::*; use super::*;
use ethers::core::types::H256;
#[test] #[test]
fn it_sign() { fn it_sign() {
let t = async { let t = async {
@ -212,10 +191,10 @@ mod test {
"1111111111111111111111111111111111111111111111111111111111111111" "1111111111111111111111111111111111111111111111111111111111111111"
.parse() .parse()
.unwrap(); .unwrap();
let message = Update { let message = Checkpoint {
home_domain: 5, outbox_domain: 5,
new_root: H256::repeat_byte(1), root: H256::repeat_byte(1),
previous_root: H256::repeat_byte(2), index: 123,
}; };
let signed = message.sign_with(&signer).await.expect("!sign_with"); let signed = message.sign_with(&signer).await.expect("!sign_with");

@ -1,187 +0,0 @@
use ethers::core::types::{Address, H256};
use std::{collections::VecDeque, io::Write};
use crate::{
accumulator::{hash, incremental::IncrementalMerkle},
AbacusError, SignedUpdate, Update,
};
/// Waiting state
#[derive(Default, Debug, Clone)]
pub struct Waiting {
queue: VecDeque<H256>,
accumulator: IncrementalMerkle,
}
/// Failed state
#[derive(Debug, Clone)]
pub struct Failed {
queue: VecDeque<H256>,
accumulator: IncrementalMerkle,
}
impl Waiting {
/// Return a reference to the root queue
pub fn queue(&self) -> &VecDeque<H256> {
&self.queue
}
/// Return a reference to the incremental merkle tree
pub fn accumulator(&self) -> &IncrementalMerkle {
&self.accumulator
}
}
impl Failed {
/// Return a reference to the root queue
pub fn queue(&self) -> &VecDeque<H256> {
&self.queue
}
/// Return a reference to the incremental merkle tree
pub fn accumulator(&self) -> &IncrementalMerkle {
&self.accumulator
}
}
fn format_message(
origin: u32,
sender: H256,
destination: u32,
recipient: H256,
body: &[u8],
) -> Vec<u8> {
let mut buf = vec![];
buf.write_all(&origin.to_be_bytes()).unwrap();
buf.write_all(sender.as_ref()).unwrap();
buf.write_all(&destination.to_be_bytes()).unwrap();
buf.write_all(recipient.as_ref()).unwrap();
buf.write_all(body).unwrap();
buf
}
/// The Home-chain Abacus object
#[derive(Debug, Clone)]
pub struct Home<S> {
local: u32,
updater: Address,
committed_root: H256,
state: S,
}
impl<S> Home<S> {
/// SLIP-44 id of the Home chain
pub fn local(&self) -> u32 {
self.local
}
/// Ethereum address of the updater
pub fn updater(&self) -> Address {
self.updater
}
/// Current state
pub fn state(&self) -> &S {
&self.state
}
fn check_sig(&self, update: &SignedUpdate) -> Result<(), AbacusError> {
update.verify(self.updater)
}
}
impl From<Home<Waiting>> for Home<Failed> {
fn from(h: Home<Waiting>) -> Self {
Self {
local: h.local,
updater: h.updater,
committed_root: h.committed_root,
state: Failed {
accumulator: h.state.accumulator,
queue: h.state.queue,
},
}
}
}
impl Home<Waiting> {
/// Get the current accumulator root
pub fn root(&self) -> H256 {
self.state().accumulator().root()
}
/// Instantiate a new Home.
pub fn init(local: u32, updater: Address) -> Home<Waiting> {
Self {
local,
updater,
committed_root: Default::default(),
state: Waiting::default(),
}
}
/// Dispatch a message
pub fn dispatch(&mut self, sender: H256, destination: u32, recipient: H256, body: &[u8]) {
let message = format_message(self.local, sender, destination, recipient, body);
let message_hash = hash(&message);
self.state.accumulator.ingest(message_hash);
self.state.queue.push_back(self.state.accumulator.root());
}
fn _update(&mut self, update: &Update) -> Result<(), AbacusError> {
if update.previous_root != self.committed_root {
return Err(AbacusError::WrongCurrentRoot {
actual: update.previous_root,
expected: self.committed_root,
});
}
if self.state.queue.contains(&update.new_root) {
loop {
let item = self.state.queue.pop_front().unwrap();
if item == update.new_root {
return Ok(());
}
}
}
Err(AbacusError::UnknownNewRoot(update.new_root))
}
/// Produce an update from the current root to the new root.
pub fn produce_update(&self) -> Update {
Update {
home_domain: self.local,
previous_root: self.committed_root,
new_root: self.state.accumulator.root(),
}
}
/// Update the root
pub fn update(&mut self, update: &SignedUpdate) -> Result<(), AbacusError> {
self.check_sig(update)?;
self._update(&update.update)
}
/// Notify the Home of a double update, and set failed.
pub fn double_update(
self,
first: &SignedUpdate,
second: &SignedUpdate,
) -> Result<Home<Failed>, Home<Waiting>> {
if first == second || self.check_sig(first).is_err() || self.check_sig(second).is_err() {
Err(self)
} else {
Ok(self.into())
}
}
/// Notify the Home of an improper update, and set failed.
pub fn improper_update(self, update: &SignedUpdate) -> Result<Home<Failed>, Home<Waiting>> {
if self.check_sig(update).is_err() || self.state.queue.contains(&update.update.new_root) {
Err(self)
} else {
Ok(self.into())
}
}
}

@ -1,131 +0,0 @@
use crate::{AbacusError, SignedUpdate};
use ethers::core::types::{Address, H256, U256};
/// Waiting state
#[derive(Debug, Clone, Copy, Default)]
pub struct Waiting {
root: H256,
}
/// Pending update state
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
pub struct Pending {
root: H256,
new_root: H256,
timeout: U256,
}
/// Failed state
#[derive(Debug, Clone, Copy)]
pub struct Failed {}
/// The Replica-chain Abacus object
#[derive(Debug, Clone, Copy, Default)]
pub struct Replica<S> {
remote: u32,
local: u32,
updater: Address,
optimistic_wait: U256,
state: S,
}
impl<S> Replica<S> {
/// SLIP-44 id of the Home chain
pub fn remote(&self) -> u32 {
self.remote
}
/// SLIP-44 id of this Replica chain
pub fn local(&self) -> u32 {
self.local
}
/// Ethereum address of the updater
pub fn updater(&self) -> Address {
self.updater
}
/// The number of seconds to wait before optimistically accepting an update
pub fn wait(&self) -> U256 {
self.optimistic_wait
}
/// Current state
pub fn state(&self) -> &S {
&self.state
}
fn check_sig(&self, update: &SignedUpdate) -> Result<(), AbacusError> {
update.verify(self.updater)
}
/// Notify Replica of double update, and set to failed
pub fn double_update(
self,
first: &SignedUpdate,
second: &SignedUpdate,
) -> Result<Replica<Failed>, Self> {
if first == second || self.check_sig(first).is_err() || self.check_sig(second).is_err() {
Err(self)
} else {
Ok(Replica {
remote: self.remote,
local: self.local,
updater: self.updater,
optimistic_wait: self.optimistic_wait,
state: Failed {},
})
}
}
}
impl Replica<Waiting> {
/// Get the current root
pub fn root(&self) -> H256 {
self.state().root
}
/// Instantiate a new Replica.
pub fn init(remote: u32, local: u32, updater: Address, optimistic_wait: U256) -> Self {
Self {
remote,
local,
updater,
optimistic_wait,
state: Waiting::default(),
}
}
/// Submit an update
pub fn update(
self,
update: &SignedUpdate,
now: impl FnOnce() -> U256,
) -> Result<Replica<Pending>, Self> {
#[allow(clippy::question_mark)]
if self.check_sig(update).is_err() {
return Err(self);
}
Ok(Replica {
remote: self.remote,
local: self.local,
updater: self.updater,
optimistic_wait: self.optimistic_wait,
state: Pending {
root: self.state.root,
new_root: update.update.new_root,
timeout: now() + self.optimistic_wait,
},
})
}
}
impl Replica<Pending> {
/// Get the current root
pub fn root(&self) -> H256 {
self.state().root
}
}

@ -5,7 +5,7 @@ use crate::{
}, },
test_utils::find_vector, test_utils::find_vector,
utils::{destination_and_nonce, domain_hash}, utils::{destination_and_nonce, domain_hash},
AbacusMessage, FailureNotification, Update, AbacusMessage, Checkpoint,
}; };
use ethers::{ use ethers::{
core::types::{H160, H256}, core::types::{H160, H256},
@ -147,8 +147,8 @@ pub mod output_functions {
.expect("Failed to write to file"); .expect("Failed to write to file");
} }
/// Outputs signed update test cases in /vector/signedUpdate.json /// Outputs signed checkpoint test cases in /vector/signedCheckpoint.json
pub fn output_signed_updates() { pub fn output_signed_checkpoints() {
let t = async { let t = async {
let signer: ethers::signers::LocalWallet = let signer: ethers::signers::LocalWallet =
"1111111111111111111111111111111111111111111111111111111111111111" "1111111111111111111111111111111111111111111111111111111111111111"
@ -159,20 +159,20 @@ pub mod output_functions {
// test suite // test suite
for i in 1..=3 { for i in 1..=3 {
let signed_update = Update { let signed_checkpoint = Checkpoint {
home_domain: 1000, outbox_domain: 1000,
new_root: H256::repeat_byte(i + 1), root: H256::repeat_byte(i + 1),
previous_root: H256::repeat_byte(i), index: i as u32,
} }
.sign_with(&signer) .sign_with(&signer)
.await .await
.expect("!sign_with"); .expect("!sign_with");
test_cases.push(json!({ test_cases.push(json!({
"homeDomain": signed_update.update.home_domain, "outboxDomain": signed_checkpoint.checkpoint.outbox_domain,
"oldRoot": signed_update.update.previous_root, "root": signed_checkpoint.checkpoint.root,
"newRoot": signed_update.update.new_root, "index": signed_checkpoint.checkpoint.index,
"signature": signed_update.signature, "signature": signed_checkpoint.signature,
"signer": signer.address(), "signer": signer.address(),
})) }))
} }
@ -183,57 +183,7 @@ pub mod output_functions {
.write(true) .write(true)
.create(true) .create(true)
.truncate(true) .truncate(true)
.open(find_vector("signedUpdate.json")) .open(find_vector("signedCheckpoint.json"))
.expect("Failed to open/create file");
file.write_all(json.as_bytes())
.expect("Failed to write to file");
};
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(t)
}
/// Outputs signed update test cases in /vector/signedFailure.json
pub fn output_signed_failure_notifications() {
let t = async {
let signer: ethers::signers::LocalWallet =
"1111111111111111111111111111111111111111111111111111111111111111"
.parse()
.unwrap();
let updater: ethers::signers::LocalWallet =
"2222222222222222222222222222222222222222222222222222222222222222"
.parse()
.unwrap();
// `home_domain` MUST BE 2000 to match home_domain domain of
// XAppConnectionManager test suite
let signed_failure = FailureNotification {
home_domain: 2000,
updater: updater.address().into(),
}
.sign_with(&signer)
.await
.expect("!sign_with");
let signed_json = json!({
"domain": signed_failure.notification.home_domain,
"updater": signed_failure.notification.updater.as_ethereum_address(),
"signature": signed_failure.signature,
"signer": signer.address()
});
let json = json!(vec!(signed_json)).to_string();
let mut file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(find_vector("signedFailure.json"))
.expect("Failed to open/create file"); .expect("Failed to open/create file");
file.write_all(json.as_bytes()) file.write_all(json.as_bytes())

@ -7,7 +7,7 @@ pub enum State {
Failed, Failed,
} }
/// The status of a message in the replica /// The status of a message in the inbox
#[repr(u8)] #[repr(u8)]
pub enum MessageStatus { pub enum MessageStatus {
/// Message is unknown /// Message is unknown

@ -1,196 +0,0 @@
use std::convert::TryFrom;
use crate::{
traits::{ChainCommunicationError, Common, TxOutcome},
utils::domain_hash,
AbacusError, AbacusMessage, Decode, Encode, Message, SignedUpdate, Update,
};
use async_trait::async_trait;
use color_eyre::Result;
use ethers::{core::types::H256, utils::keccak256};
/// A Stamped message that has been committed at some leaf index
#[derive(Debug, Default, Clone, PartialEq)]
pub struct RawCommittedMessage {
/// The index at which the message is committed
pub leaf_index: u32,
/// The home's current root when the message was committed.
pub committed_root: H256,
/// The fully detailed message that was committed
pub message: Vec<u8>,
}
impl RawCommittedMessage {
/// Return the `leaf` for this raw message
///
/// The leaf is the keccak256 digest of the message, which is committed
/// in the message tree
pub fn leaf(&self) -> H256 {
keccak256(&self.message).into()
}
}
impl Encode for RawCommittedMessage {
fn write_to<W>(&self, writer: &mut W) -> std::io::Result<usize>
where
W: std::io::Write,
{
writer.write_all(&self.leaf_index.to_be_bytes())?;
writer.write_all(self.committed_root.as_ref())?;
writer.write_all(&self.message)?;
Ok(4 + 32 + self.message.len())
}
}
impl Decode for RawCommittedMessage {
fn read_from<R>(reader: &mut R) -> Result<Self, AbacusError>
where
R: std::io::Read,
Self: Sized,
{
let mut idx = [0u8; 4];
reader.read_exact(&mut idx)?;
let mut hash = [0u8; 32];
reader.read_exact(&mut hash)?;
let mut message = vec![];
reader.read_to_end(&mut message)?;
Ok(Self {
leaf_index: u32::from_be_bytes(idx),
committed_root: hash.into(),
message,
})
}
}
// ember: tracingify these across usage points
/// A Stamped message that has been committed at some leaf index
#[derive(Debug, Default, Clone)]
pub struct CommittedMessage {
/// The index at which the message is committed
pub leaf_index: u32,
/// The home's current root when the message was committed.
pub committed_root: H256,
/// The fully detailed message that was committed
pub message: AbacusMessage,
}
impl CommittedMessage {
/// Return the leaf associated with the message
pub fn to_leaf(&self) -> H256 {
self.message.to_leaf()
}
}
impl AsRef<AbacusMessage> for CommittedMessage {
fn as_ref(&self) -> &AbacusMessage {
&self.message
}
}
impl TryFrom<RawCommittedMessage> for CommittedMessage {
type Error = AbacusError;
fn try_from(raw: RawCommittedMessage) -> Result<Self, Self::Error> {
Ok(Self {
leaf_index: raw.leaf_index,
committed_root: raw.committed_root,
message: AbacusMessage::read_from(&mut &raw.message[..])?,
})
}
}
/// Interface for the Home chain contract. Allows abstraction over different
/// chains
#[async_trait]
pub trait Home: Common + Send + Sync + std::fmt::Debug {
/// Return the domain ID
fn local_domain(&self) -> u32;
/// Return the domain hash
fn home_domain_hash(&self) -> H256 {
domain_hash(self.local_domain())
}
/// Fetch the nonce
async fn nonces(&self, destination: u32) -> Result<u32, ChainCommunicationError>;
/// Dispatch a message.
async fn dispatch(&self, message: &Message) -> Result<TxOutcome, ChainCommunicationError>;
/// Check if queue contains root.
async fn queue_contains(&self, root: H256) -> Result<bool, ChainCommunicationError>;
/// Submit an improper update for slashing
async fn improper_update(
&self,
update: &SignedUpdate,
) -> Result<TxOutcome, ChainCommunicationError>;
/// Create a valid update based on the chain's current state.
/// This merely suggests an update. It does NOT ensure that no other valid
/// update has been produced. The updater MUST take measures to prevent
/// double-updating. If no messages are queued, this must produce Ok(None).
async fn produce_update(&self) -> Result<Option<Update>, ChainCommunicationError>;
}
/// Interface for retrieving event data emitted specifically by the home
#[async_trait]
pub trait HomeEvents: Home + Send + Sync + std::fmt::Debug {
/// Fetch the message to destination at the nonce (or error).
/// This should fetch events from the chain API.
///
/// Used by processors to get messages in order
async fn raw_message_by_nonce(
&self,
destination: u32,
nonce: u32,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError>;
/// Fetch the message to destination at the nonce (or error).
/// This should fetch events from the chain API
async fn message_by_nonce(
&self,
destination: u32,
nonce: u32,
) -> Result<Option<CommittedMessage>, ChainCommunicationError> {
self.raw_message_by_nonce(destination, nonce)
.await?
.map(CommittedMessage::try_from)
.transpose()
.map_err(Into::into)
}
/// Look up a message by its hash.
/// This should fetch events from the chain API
async fn raw_message_by_leaf(
&self,
leaf: H256,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError>;
/// Look up a message by its hash.
/// This should fetch events from the chain API
async fn message_by_leaf(
&self,
leaf: H256,
) -> Result<Option<CommittedMessage>, ChainCommunicationError> {
self.raw_message_by_leaf(leaf)
.await?
.map(CommittedMessage::try_from)
.transpose()
.map_err(Into::into)
}
/// Fetch the tree_index-th leaf inserted into the merkle tree.
/// Returns `Ok(None)` if no leaf exists for given `tree_size` (`Ok(None)`
/// serves as the return value for an index error). If tree_index == 0,
/// this will return the first inserted leaf. This is because the Home
/// emits the index at which the leaf was inserted in (`tree.count() - 1`),
/// thus the first inserted leaf has an index of 0.
async fn leaf_by_tree_index(
&self,
tree_index: usize,
) -> Result<Option<H256>, ChainCommunicationError>;
}

@ -1,36 +1,16 @@
//! An Indexer provides a common interface for bubbling up chain-specific //! An Indexer provides a common interface for bubbling up chain-specific
//! event-data to another entity (e.g. a `ContractSync`). For example, the only //! event-data to another entity (e.g. a `ContractSync`). For example, the only
//! way to retrieve data such as the chain's latest block number or a list of //! way to retrieve data such as the chain's latest block number or a list of
//! updates/messages emitted within a certain block range by calling out to a //! checkpoints/messages emitted within a certain block range by calling out to a
//! chain-specific library and provider (e.g. ethers::provider). A //! chain-specific library and provider (e.g. ethers::provider). A
//! chain-specific home or replica should implement one or both of the Indexer //! chain-specific outbox or inbox should implement one or both of the Indexer
//! traits (CommonIndexer or HomeIndexer) to provide an common interface which //! traits (CommonIndexer or OutboxIndexer) to provide an common interface which
//! other entities can retrieve this chain-specific info. //! other entities can retrieve this chain-specific info.
use async_trait::async_trait; use async_trait::async_trait;
use color_eyre::Result; use color_eyre::Result;
use crate::{CheckpointWithMeta, RawCommittedMessage, SignedUpdateWithMeta}; use crate::{CheckpointWithMeta, RawCommittedMessage};
/// Interface for Common contract indexer. Interface that allows for other
/// entities to retrieve chain-specific data from a home or replica.
#[async_trait]
pub trait CommonIndexer: Send + Sync + std::fmt::Debug {
/// Get chain's latest block number
async fn get_block_number(&self) -> Result<u32>;
/// Fetch sequentially sorted list of updates between blocks `from` and `to`
async fn fetch_sorted_updates(&self, from: u32, to: u32) -> Result<Vec<SignedUpdateWithMeta>>;
}
/// Interface for Home contract indexer. Interface for allowing other
/// entities to retrieve chain-specific data from a home.
#[async_trait]
pub trait HomeIndexer: CommonIndexer + Send + Sync + std::fmt::Debug {
/// Fetch list of messages between blocks `from` and `to`.
async fn fetch_sorted_messages(&self, _from: u32, _to: u32)
-> Result<Vec<RawCommittedMessage>>;
}
/// Interface for Abacus Common contract indexer. Interface that allows for other /// Interface for Abacus Common contract indexer. Interface that allows for other
/// entities to retrieve chain-specific data from an outbox or inbox. /// entities to retrieve chain-specific data from an outbox or inbox.
@ -44,8 +24,8 @@ pub trait AbacusCommonIndexer: Send + Sync + std::fmt::Debug {
-> Result<Vec<CheckpointWithMeta>>; -> Result<Vec<CheckpointWithMeta>>;
} }
/// Interface for Home contract indexer. Interface for allowing other /// Interface for Outbox contract indexer. Interface for allowing other
/// entities to retrieve chain-specific data from a home. /// entities to retrieve chain-specific data from an outbox.
#[async_trait] #[async_trait]
pub trait OutboxIndexer: AbacusCommonIndexer + Send + Sync + std::fmt::Debug { pub trait OutboxIndexer: AbacusCommonIndexer + Send + Sync + std::fmt::Debug {
/// Fetch list of messages between blocks `from` and `to`. /// Fetch list of messages between blocks `from` and `to`.

@ -0,0 +1,98 @@
use std::convert::TryFrom;
use crate::{AbacusError, AbacusMessage, Decode, Encode};
use color_eyre::Result;
use ethers::{core::types::H256, utils::keccak256};
/// A Stamped message that has been committed at some leaf index
#[derive(Debug, Default, Clone, PartialEq)]
pub struct RawCommittedMessage {
/// The index at which the message is committed
pub leaf_index: u32,
/// The Outbox's current root when the message was committed.
pub committed_root: H256,
/// The fully detailed message that was committed
pub message: Vec<u8>,
}
impl RawCommittedMessage {
/// Return the `leaf` for this raw message
///
/// The leaf is the keccak256 digest of the message, which is committed
/// in the message tree
pub fn leaf(&self) -> H256 {
keccak256(&self.message).into()
}
}
impl Encode for RawCommittedMessage {
fn write_to<W>(&self, writer: &mut W) -> std::io::Result<usize>
where
W: std::io::Write,
{
writer.write_all(&self.leaf_index.to_be_bytes())?;
writer.write_all(self.committed_root.as_ref())?;
writer.write_all(&self.message)?;
Ok(4 + 32 + self.message.len())
}
}
impl Decode for RawCommittedMessage {
fn read_from<R>(reader: &mut R) -> Result<Self, AbacusError>
where
R: std::io::Read,
Self: Sized,
{
let mut idx = [0u8; 4];
reader.read_exact(&mut idx)?;
let mut hash = [0u8; 32];
reader.read_exact(&mut hash)?;
let mut message = vec![];
reader.read_to_end(&mut message)?;
Ok(Self {
leaf_index: u32::from_be_bytes(idx),
committed_root: hash.into(),
message,
})
}
}
// ember: tracingify these across usage points
/// A Stamped message that has been committed at some leaf index
#[derive(Debug, Default, Clone)]
pub struct CommittedMessage {
/// The index at which the message is committed
pub leaf_index: u32,
/// The Outbox's current root when the message was committed.
pub committed_root: H256,
/// The fully detailed message that was committed
pub message: AbacusMessage,
}
impl CommittedMessage {
/// Return the leaf associated with the message
pub fn to_leaf(&self) -> H256 {
self.message.to_leaf()
}
}
impl AsRef<AbacusMessage> for CommittedMessage {
fn as_ref(&self) -> &AbacusMessage {
&self.message
}
}
impl TryFrom<RawCommittedMessage> for CommittedMessage {
type Error = AbacusError;
fn try_from(raw: RawCommittedMessage) -> Result<Self, Self::Error> {
Ok(Self {
leaf_index: raw.leaf_index,
committed_root: raw.committed_root,
message: AbacusMessage::read_from(&mut &raw.message[..])?,
})
}
}

@ -1,11 +1,9 @@
mod common; mod common;
mod encode; mod encode;
mod home;
mod inbox; mod inbox;
mod indexer; mod indexer;
mod message;
mod outbox; mod outbox;
mod replica;
mod xapp;
use async_trait::async_trait; use async_trait::async_trait;
use color_eyre::Result; use color_eyre::Result;
@ -16,20 +14,14 @@ use ethers::{
}; };
use std::error::Error as StdError; use std::error::Error as StdError;
use crate::{db::DbError, utils::domain_hash, AbacusError, Checkpoint, SignedUpdate}; use crate::{db::DbError, utils::domain_hash, AbacusError, Checkpoint};
pub use common::*; pub use common::*;
pub use encode::*; pub use encode::*;
pub use home::*;
pub use inbox::*; pub use inbox::*;
pub use indexer::*; pub use indexer::*;
pub use message::*;
pub use outbox::*; pub use outbox::*;
pub use replica::*;
pub use xapp::*;
/// Returned by `check_double_update` if double update exists
#[derive(Debug, Clone, PartialEq)]
pub struct DoubleUpdate(pub SignedUpdate, pub SignedUpdate);
/// The result of a transaction /// The result of a transaction
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@ -86,61 +78,6 @@ where
} }
} }
/// Interface for attributes shared by Home and Replica
#[async_trait]
pub trait Common: Sync + Send + std::fmt::Debug {
/// Return an identifier (not necessarily unique) for the chain this
/// contract is running on.
fn name(&self) -> &str;
/// Get the status of a transaction.
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError>;
/// Fetch the current updater value
async fn updater(&self) -> Result<H256, ChainCommunicationError>;
/// Fetch the current state.
async fn state(&self) -> Result<State, ChainCommunicationError>;
/// Fetch the current root.
async fn committed_root(&self) -> Result<H256, ChainCommunicationError>;
/// Submit a signed update for inclusion
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError>;
/// Submit a double update for slashing
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError>;
}
/// Interface for retrieving event data emitted by both the home and replica
#[async_trait]
pub trait CommonEvents: Common + Send + Sync + std::fmt::Debug {
/// Fetch the first signed update building off of `old_root`. If `old_root`
/// was never accepted or has never been updated, this will return `Ok(None )`.
/// This should fetch events from the chain API
async fn signed_update_by_old_root(
&self,
old_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError>;
/// Fetch the first signed update with a new root of `new_root`. If update
/// has not been produced, this will return `Ok(None)`. This should fetch
/// events from the chain API
async fn signed_update_by_new_root(
&self,
new_root: H256,
) -> Result<Option<SignedUpdate>, ChainCommunicationError>;
/// Fetch most recent signed_update.
async fn poll_signed_update(&self) -> Result<Option<SignedUpdate>, ChainCommunicationError> {
let committed_root = self.committed_root().await?;
self.signed_update_by_new_root(committed_root).await
}
}
/// Interface for attributes shared by Outbox and Inbox /// Interface for attributes shared by Outbox and Inbox
#[async_trait] #[async_trait]
pub trait AbacusCommon: Sync + Send + std::fmt::Debug { pub trait AbacusCommon: Sync + Send + std::fmt::Debug {
@ -159,7 +96,7 @@ pub trait AbacusCommon: Sync + Send + std::fmt::Debug {
/// Get the status of a transaction. /// Get the status of a transaction.
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError>; async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError>;
/// Fetch the current updater value /// Fetch the current validator manager value
async fn validator_manager(&self) -> Result<H256, ChainCommunicationError>; async fn validator_manager(&self) -> Result<H256, ChainCommunicationError>;
/// Fetch the current root. /// Fetch the current root.

@ -81,7 +81,7 @@ pub trait OutboxEvents: Outbox + Send + Sync + std::fmt::Debug {
/// Fetch the tree_index-th leaf inserted into the merkle tree. /// Fetch the tree_index-th leaf inserted into the merkle tree.
/// Returns `Ok(None)` if no leaf exists for given `tree_size` (`Ok(None)` /// Returns `Ok(None)` if no leaf exists for given `tree_size` (`Ok(None)`
/// serves as the return value for an index error). If tree_index == 0, /// serves as the return value for an index error). If tree_index == 0,
/// this will return the first inserted leaf. This is because the Home /// this will return the first inserted leaf. This is because the Outbox
/// emits the index at which the leaf was inserted in (`tree.count() - 1`), /// emits the index at which the leaf was inserted in (`tree.count() - 1`),
/// thus the first inserted leaf has an index of 0. /// thus the first inserted leaf has an index of 0.
async fn leaf_by_tree_index( async fn leaf_by_tree_index(

@ -1,43 +0,0 @@
use crate::traits::common::MessageStatus;
use async_trait::async_trait;
use color_eyre::Result;
use ethers::core::types::H256;
use crate::{
accumulator::merkle::Proof,
traits::{ChainCommunicationError, Common, TxOutcome},
AbacusMessage,
};
/// Interface for on-chain replicas
#[async_trait]
pub trait Replica: Common + Send + Sync + std::fmt::Debug {
/// Return the replica domain ID
fn local_domain(&self) -> u32;
/// Return the domain of the replica's linked home
async fn remote_domain(&self) -> Result<u32, ChainCommunicationError>;
/// Dispatch a transaction to prove inclusion of some leaf in the replica.
async fn prove(&self, proof: &Proof) -> Result<TxOutcome, ChainCommunicationError>;
/// Trigger processing of a message
async fn process(&self, message: &AbacusMessage) -> Result<TxOutcome, ChainCommunicationError>;
/// Prove a leaf in the replica and then process its message
async fn prove_and_process(
&self,
message: &AbacusMessage,
proof: &Proof,
) -> Result<TxOutcome, ChainCommunicationError> {
self.prove(proof).await?;
Ok(self.process(message).await?)
}
/// Fetch the status of a message
async fn message_status(&self, leaf: H256) -> Result<MessageStatus, ChainCommunicationError>;
/// Fetch the confirmation time for a specific root
async fn acceptable_root(&self, root: H256) -> Result<bool, ChainCommunicationError>;
}

@ -1,53 +0,0 @@
use crate::{
traits::{ChainCommunicationError, TxOutcome},
AbacusIdentifier, SignedFailureNotification,
};
use async_trait::async_trait;
/// Interface for on-chain XAppConnectionManager
#[async_trait]
pub trait ConnectionManager: Send + Sync + std::fmt::Debug {
/// Return the contract's local domain ID
fn local_domain(&self) -> u32;
/// Returns true if provided address is enrolled replica
async fn is_replica(&self, address: AbacusIdentifier) -> Result<bool, ChainCommunicationError>;
/// Returns permission for address at given domain
async fn watcher_permission(
&self,
address: AbacusIdentifier,
domain: u32,
) -> Result<bool, ChainCommunicationError>;
/// onlyOwner function. Enrolls replica at given domain chain.
async fn owner_enroll_replica(
&self,
replica: AbacusIdentifier,
domain: u32,
) -> Result<TxOutcome, ChainCommunicationError>;
/// onlyOwner function. Unenrolls replica.
async fn owner_unenroll_replica(
&self,
replica: AbacusIdentifier,
) -> Result<TxOutcome, ChainCommunicationError>;
/// onlyOwner function. Sets contract's home to provided home.
async fn set_home(&self, home: AbacusIdentifier) -> Result<TxOutcome, ChainCommunicationError>;
/// onlyOwner function. Sets permission for watcher at given domain.
async fn set_watcher_permission(
&self,
watcher: AbacusIdentifier,
domain: u32,
access: bool,
) -> Result<TxOutcome, ChainCommunicationError>;
/// Unenroll the replica at the given domain provided an updater address
/// and `SignedFailureNotification` from a watcher
async fn unenroll_replica(
&self,
signed_failure: &SignedFailureNotification,
) -> Result<TxOutcome, ChainCommunicationError>;
}

@ -1,71 +0,0 @@
use crate::{utils::domain_hash, AbacusError, AbacusIdentifier, SignerExt};
use ethers::{
prelude::{Address, Signature},
types::H256,
utils::hash_message,
};
use ethers_signers::Signer;
use sha3::{Digest, Keccak256};
/// Failure notification produced by watcher
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct FailureNotification {
/// Domain of failed home
pub home_domain: u32,
/// Failed home's updater
pub updater: AbacusIdentifier,
}
impl FailureNotification {
fn signing_hash(&self) -> H256 {
H256::from_slice(
Keccak256::new()
.chain(domain_hash(self.home_domain))
.chain(self.home_domain.to_be_bytes())
.chain(self.updater.as_ref())
.finalize()
.as_slice(),
)
}
fn prepended_hash(&self) -> H256 {
hash_message(self.signing_hash())
}
/// Sign an `FailureNotification` using the specified signer
pub async fn sign_with<S>(self, signer: &S) -> Result<SignedFailureNotification, S::Error>
where
S: Signer,
{
let signature = signer
.sign_message_without_eip_155(self.signing_hash())
.await?;
Ok(SignedFailureNotification {
notification: self,
signature,
})
}
}
/// Signed failure notification produced by watcher
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct SignedFailureNotification {
/// Failure notification
pub notification: FailureNotification,
/// Signature
pub signature: Signature,
}
impl SignedFailureNotification {
/// Recover the Ethereum address of the signer
pub fn recover(&self) -> Result<Address, AbacusError> {
Ok(self.signature.recover(self.notification.prepended_hash())?)
}
/// Check whether a message was signed by a specific address
pub fn verify(&self, signer: Address) -> Result<(), AbacusError> {
Ok(self
.signature
.verify(self.notification.prepended_hash(), signer)?)
}
}

@ -9,7 +9,7 @@ const ABACUS_MESSAGE_PREFIX_LEN: usize = 76;
pub struct AbacusMessage { pub struct AbacusMessage {
/// 4 SLIP-44 ID /// 4 SLIP-44 ID
pub origin: u32, pub origin: u32,
/// 32 Address in home convention /// 32 Address in Outbox convention
pub sender: H256, pub sender: H256,
/// 4 Count of all previous messages to destination /// 4 Count of all previous messages to destination
pub nonce: u32, pub nonce: u32,

@ -1,13 +1,9 @@
mod checkpoint; mod checkpoint;
mod failure;
mod messages; mod messages;
mod update;
/// Unified 32-byte identifier with convenience tooling for handling /// Unified 32-byte identifier with convenience tooling for handling
/// 20-byte ids (e.g ethereum addresses) /// 20-byte ids (e.g ethereum addresses)
pub mod identifiers; pub mod identifiers;
pub use checkpoint::*; pub use checkpoint::*;
pub use failure::*;
pub use messages::*; pub use messages::*;
pub use update::*;

@ -1,184 +0,0 @@
use crate::{utils::domain_hash, AbacusError, Decode, Encode, SignerExt};
use ethers::{
prelude::{Address, Signature},
types::H256,
utils::hash_message,
};
use ethers_signers::Signer;
use serde::{Deserialize, Serialize};
use sha3::{Digest, Keccak256};
/// An Abacus update message
#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct Update {
/// The home chain
pub home_domain: u32,
/// The previous root
pub previous_root: H256,
/// The new root
pub new_root: H256,
}
impl std::fmt::Display for Update {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Update(domain {} moved from {} to {})",
self.home_domain, self.previous_root, self.new_root
)
}
}
impl Encode for Update {
fn write_to<W>(&self, writer: &mut W) -> std::io::Result<usize>
where
W: std::io::Write,
{
writer.write_all(&self.home_domain.to_be_bytes())?;
writer.write_all(self.previous_root.as_ref())?;
writer.write_all(self.new_root.as_ref())?;
Ok(4 + 32 + 32)
}
}
impl Decode for Update {
fn read_from<R>(reader: &mut R) -> Result<Self, AbacusError>
where
R: std::io::Read,
Self: Sized,
{
let mut home_domain = [0u8; 4];
reader.read_exact(&mut home_domain)?;
let mut previous_root = H256::zero();
reader.read_exact(previous_root.as_mut())?;
let mut new_root = H256::zero();
reader.read_exact(new_root.as_mut())?;
Ok(Self {
home_domain: u32::from_be_bytes(home_domain),
previous_root,
new_root,
})
}
}
impl Update {
fn signing_hash(&self) -> H256 {
// sign:
// domain(home_domain) || previous_root || new_root
H256::from_slice(
Keccak256::new()
.chain(domain_hash(self.home_domain))
.chain(self.previous_root)
.chain(self.new_root)
.finalize()
.as_slice(),
)
}
fn prepended_hash(&self) -> H256 {
hash_message(self.signing_hash())
}
/// Sign an update using the specified signer
pub async fn sign_with<S: Signer>(self, signer: &S) -> Result<SignedUpdate, S::Error> {
let signature = signer
.sign_message_without_eip_155(self.signing_hash())
.await?;
Ok(SignedUpdate {
update: self,
signature,
})
}
}
/// Metadata stored about an update
#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct UpdateMeta {
/// Block number
pub block_number: u64,
}
impl Encode for UpdateMeta {
fn write_to<W>(&self, writer: &mut W) -> std::io::Result<usize>
where
W: std::io::Write,
{
let mut written = 0;
written += self.block_number.write_to(writer)?;
Ok(written)
}
}
impl Decode for UpdateMeta {
fn read_from<R>(reader: &mut R) -> Result<Self, AbacusError>
where
R: std::io::Read,
Self: Sized,
{
let mut block_number = [0u8; 8];
reader.read_exact(&mut block_number)?;
Ok(Self {
block_number: u64::from_be_bytes(block_number),
})
}
}
/// A Signed Abacus Update with Metadata
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct SignedUpdateWithMeta {
/// Signed update
pub signed_update: SignedUpdate,
/// Metadata
pub metadata: UpdateMeta,
}
/// A Signed Abacus Update
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct SignedUpdate {
/// The update
pub update: Update,
/// The signature
pub signature: Signature,
}
impl Encode for SignedUpdate {
fn write_to<W>(&self, writer: &mut W) -> std::io::Result<usize>
where
W: std::io::Write,
{
let mut written = 0;
written += self.update.write_to(writer)?;
written += self.signature.write_to(writer)?;
Ok(written)
}
}
impl Decode for SignedUpdate {
fn read_from<R>(reader: &mut R) -> Result<Self, AbacusError>
where
R: std::io::Read,
Self: Sized,
{
let update = Update::read_from(reader)?;
let signature = Signature::read_from(reader)?;
Ok(Self { update, signature })
}
}
impl SignedUpdate {
/// Recover the Ethereum address of the signer
pub fn recover(&self) -> Result<Address, AbacusError> {
Ok(self.signature.recover(self.update.prepended_hash())?)
}
/// Check whether a message was signed by a specific address
pub fn verify(&self, signer: Address) -> Result<(), AbacusError> {
Ok(self
.signature
.verify(self.update.prepended_hash(), signer)?)
}
}

@ -1,139 +0,0 @@
#![allow(non_snake_case)]
use async_trait::async_trait;
use mockall::*;
use ethers::core::types::H256;
use abacus_core::*;
mock! {
pub HomeContract {
// Home
pub fn _local_domain(&self) -> u32 {}
pub fn _home_domain_hash(&self) -> H256 {}
pub fn _raw_message_by_nonce(
&self,
destination: u32,
nonce: u32,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {}
pub fn _raw_message_by_leaf(
&self,
leaf: H256,
) -> Result<Option<RawCommittedMessage>, ChainCommunicationError> {}
pub fn _leaf_by_tree_index(
&self,
tree_index: usize,
) -> Result<Option<H256>, ChainCommunicationError> {}
pub fn _nonces(&self, destination: u32) -> Result<u32, ChainCommunicationError> {}
pub fn _dispatch(&self, message: &Message) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _queue_contains(&self, root: H256) -> Result<bool, ChainCommunicationError> {}
pub fn _improper_update(
&self,
update: &SignedUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _produce_update(&self) -> Result<Option<Update>, ChainCommunicationError> {}
// Common
pub fn _name(&self) -> &str {}
pub fn _status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {}
pub fn _updater(&self) -> Result<H256, ChainCommunicationError> {}
pub fn _state(&self) -> Result<State, ChainCommunicationError> {}
pub fn _committed_root(&self) -> Result<H256, ChainCommunicationError> {}
pub fn _update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {}
}
}
impl std::fmt::Debug for MockHomeContract {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MockHomeContract")
}
}
#[async_trait]
impl Home for MockHomeContract {
fn local_domain(&self) -> u32 {
self._local_domain()
}
fn home_domain_hash(&self) -> H256 {
self._home_domain_hash()
}
async fn nonces(&self, destination: u32) -> Result<u32, ChainCommunicationError> {
self._nonces(destination)
}
async fn dispatch(&self, message: &Message) -> Result<TxOutcome, ChainCommunicationError> {
self._dispatch(message)
}
async fn queue_contains(&self, root: H256) -> Result<bool, ChainCommunicationError> {
self._queue_contains(root)
}
async fn improper_update(
&self,
update: &SignedUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
self._improper_update(update)
}
async fn produce_update(&self) -> Result<Option<Update>, ChainCommunicationError> {
self._produce_update()
}
}
#[async_trait]
impl Common for MockHomeContract {
fn name(&self) -> &str {
self._name()
}
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
self._status(txid)
}
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
self._updater()
}
async fn state(&self) -> Result<State, ChainCommunicationError> {
self._state()
}
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
self._committed_root()
}
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
self._update(update)
}
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
self._double_update(double)
}
}

@ -10,8 +10,6 @@ mock! {
pub Indexer { pub Indexer {
pub fn _get_block_number(&self) -> Result<u32> {} pub fn _get_block_number(&self) -> Result<u32> {}
pub fn _fetch_sorted_updates(&self, from: u32, to: u32) -> Result<Vec<SignedUpdateWithMeta>> {}
pub fn _fetch_sorted_messages(&self, from: u32, to: u32) -> Result<Vec<RawCommittedMessage>> {} pub fn _fetch_sorted_messages(&self, from: u32, to: u32) -> Result<Vec<RawCommittedMessage>> {}
} }
} }
@ -22,24 +20,6 @@ impl std::fmt::Debug for MockIndexer {
} }
} }
#[async_trait]
impl CommonIndexer for MockIndexer {
async fn get_block_number(&self) -> Result<u32> {
self._get_block_number()
}
async fn fetch_sorted_updates(&self, from: u32, to: u32) -> Result<Vec<SignedUpdateWithMeta>> {
self._fetch_sorted_updates(from, to)
}
}
#[async_trait]
impl HomeIndexer for MockIndexer {
async fn fetch_sorted_messages(&self, from: u32, to: u32) -> Result<Vec<RawCommittedMessage>> {
self._fetch_sorted_messages(from, to)
}
}
mock! { mock! {
pub AbacusIndexer { pub AbacusIndexer {
pub fn _get_block_number(&self) -> Result<u32> {} pub fn _get_block_number(&self) -> Result<u32> {}

@ -4,20 +4,8 @@ pub mod outbox;
/// Mock inbox contract /// Mock inbox contract
pub mod inbox; pub mod inbox;
/// Mock home contract
pub mod home;
/// Mock replica contract
pub mod replica;
/// Mock indexer /// Mock indexer
pub mod indexer; pub mod indexer;
/// Mock connection manager contract
pub mod xapp;
pub use home::MockHomeContract;
pub use indexer::MockIndexer; pub use indexer::MockIndexer;
pub use outbox::MockOutboxContract; pub use outbox::MockOutboxContract;
pub use replica::MockReplicaContract;
pub use xapp::MockConnectionManagerContract;

@ -9,7 +9,7 @@ use abacus_core::*;
mock! { mock! {
pub OutboxContract { pub OutboxContract {
// Home // Outbox
pub fn _local_domain(&self) -> u32 {} pub fn _local_domain(&self) -> u32 {}
pub fn _domain_hash(&self) -> H256 {} pub fn _domain_hash(&self) -> H256 {}

@ -1,123 +0,0 @@
#![allow(non_snake_case)]
use async_trait::async_trait;
use mockall::*;
use ethers::core::types::H256;
use abacus_core::{accumulator::merkle::Proof, *};
mock! {
pub ReplicaContract {
// Replica
pub fn _local_domain(&self) -> u32 {}
pub fn _remote_domain(&self) -> Result<u32, ChainCommunicationError> {}
pub fn _prove(&self, proof: &Proof) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _process(&self, message: &AbacusMessage) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _prove_and_process(
&self,
message: &AbacusMessage,
proof: &Proof,
) -> Result<TxOutcome, ChainCommunicationError> {}
// Common
pub fn _name(&self) -> &str {}
pub fn _status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {}
pub fn _updater(&self) -> Result<H256, ChainCommunicationError> {}
pub fn _state(&self) -> Result<State, ChainCommunicationError> {}
pub fn _committed_root(&self) -> Result<H256, ChainCommunicationError> {}
pub fn _update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _message_status(&self, leaf: H256) -> Result<MessageStatus, ChainCommunicationError> {}
pub fn _acceptable_root(&self, root: H256) -> Result<bool, ChainCommunicationError> {}
}
}
impl std::fmt::Debug for MockReplicaContract {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MockReplicaContract")
}
}
#[async_trait]
impl Replica for MockReplicaContract {
fn local_domain(&self) -> u32 {
self._local_domain()
}
async fn remote_domain(&self) -> Result<u32, ChainCommunicationError> {
self._remote_domain()
}
async fn prove(&self, proof: &Proof) -> Result<TxOutcome, ChainCommunicationError> {
self._prove(proof)
}
async fn process(&self, message: &AbacusMessage) -> Result<TxOutcome, ChainCommunicationError> {
self._process(message)
}
async fn prove_and_process(
&self,
message: &AbacusMessage,
proof: &Proof,
) -> Result<TxOutcome, ChainCommunicationError> {
self._prove_and_process(message, proof)
}
async fn message_status(&self, leaf: H256) -> Result<MessageStatus, ChainCommunicationError> {
self._message_status(leaf)
}
async fn acceptable_root(&self, root: H256) -> Result<bool, ChainCommunicationError> {
self._acceptable_root(root)
}
}
#[async_trait]
impl Common for MockReplicaContract {
fn name(&self) -> &str {
self._name()
}
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
self._status(txid)
}
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
self._updater()
}
async fn state(&self) -> Result<State, ChainCommunicationError> {
self._state()
}
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
self._committed_root()
}
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
self._update(update)
}
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
self._double_update(double)
}
}

@ -1,105 +0,0 @@
#![allow(non_snake_case)]
use async_trait::async_trait;
use mockall::*;
use abacus_core::*;
mock! {
pub ConnectionManagerContract {
pub fn _local_domain(&self) -> u32 {}
pub fn _is_replica(&self, address: AbacusIdentifier) -> Result<bool, ChainCommunicationError> {}
pub fn _watcher_permission(
&self,
address: AbacusIdentifier,
domain: u32,
) -> Result<bool, ChainCommunicationError> {}
pub fn _owner_enroll_replica(
&self,
replica: AbacusIdentifier,
domain: u32,
) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _owner_unenroll_replica(
&self,
replica: AbacusIdentifier,
) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _set_home(&self, home: AbacusIdentifier) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _set_watcher_permission(
&self,
watcher: AbacusIdentifier,
domain: u32,
access: bool,
) -> Result<TxOutcome, ChainCommunicationError> {}
pub fn _unenroll_replica(
&self,
signed_failure: &SignedFailureNotification,
) -> Result<TxOutcome, ChainCommunicationError> {}
}
}
impl std::fmt::Debug for MockConnectionManagerContract {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MockConnectionManagerContract")
}
}
#[async_trait]
impl ConnectionManager for MockConnectionManagerContract {
fn local_domain(&self) -> u32 {
self._local_domain()
}
async fn is_replica(&self, address: AbacusIdentifier) -> Result<bool, ChainCommunicationError> {
self._is_replica(address)
}
async fn watcher_permission(
&self,
address: AbacusIdentifier,
domain: u32,
) -> Result<bool, ChainCommunicationError> {
self._watcher_permission(address, domain)
}
async fn owner_enroll_replica(
&self,
replica: AbacusIdentifier,
domain: u32,
) -> Result<TxOutcome, ChainCommunicationError> {
self._owner_enroll_replica(replica, domain)
}
async fn owner_unenroll_replica(
&self,
replica: AbacusIdentifier,
) -> Result<TxOutcome, ChainCommunicationError> {
self._owner_unenroll_replica(replica)
}
async fn set_home(&self, home: AbacusIdentifier) -> Result<TxOutcome, ChainCommunicationError> {
self._set_home(home)
}
async fn set_watcher_permission(
&self,
watcher: AbacusIdentifier,
domain: u32,
access: bool,
) -> Result<TxOutcome, ChainCommunicationError> {
self._set_watcher_permission(watcher, domain, access)
}
async fn unenroll_replica(
&self,
signed_failure: &SignedFailureNotification,
) -> Result<TxOutcome, ChainCommunicationError> {
self._unenroll_replica(signed_failure)
}
}

@ -48,8 +48,8 @@ mod test {
#[tokio::test] #[tokio::test]
async fn db_stores_and_retrieves_messages() { async fn db_stores_and_retrieves_messages() {
run_test_db(|db| async move { run_test_db(|db| async move {
let home_name = "home_1".to_owned(); let outbox_name = "outbox_1".to_owned();
let db = AbacusDB::new(home_name, db); let db = AbacusDB::new(outbox_name, db);
let m = AbacusMessage { let m = AbacusMessage {
origin: 10, origin: 10,
@ -90,8 +90,8 @@ mod test {
#[tokio::test] #[tokio::test]
async fn db_stores_and_retrieves_proofs() { async fn db_stores_and_retrieves_proofs() {
run_test_db(|db| async move { run_test_db(|db| async move {
let home_name = "home_1".to_owned(); let outbox_name = "outbox_1".to_owned();
let db = AbacusDB::new(home_name, db); let db = AbacusDB::new(outbox_name, db);
let proof = Proof { let proof = Proof {
leaf: H256::from_low_u64_be(15), leaf: H256::from_low_u64_be(15),

@ -1,6 +1,6 @@
use std::{sync::Arc, time::Duration}; use std::{sync::Arc, time::Duration};
use color_eyre::{eyre::bail, Result}; use color_eyre::{eyre::WrapErr, Result};
use rand::distributions::Alphanumeric; use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@ -10,30 +10,30 @@ use tracing::{info, Instrument};
use ethers::core::types::H256; use ethers::core::types::H256;
use abacus_base::{decl_agent, AbacusAgent, AgentCore}; use abacus_base::{decl_agent, AbacusAgentCore, Agent, CachingInbox};
use abacus_core::{Home, Message, Replica}; use abacus_core::{AbacusCommon, Message, Outbox};
use crate::settings::KathySettings as Settings; use crate::settings::KathySettings as Settings;
decl_agent!(Kathy { decl_agent!(Kathy {
duration: u64, duration: u64,
generator: ChatGenerator, generator: ChatGenerator,
home_lock: Arc<Mutex<()>>, outbox_lock: Arc<Mutex<()>>,
}); });
impl Kathy { impl Kathy {
pub fn new(duration: u64, generator: ChatGenerator, core: AgentCore) -> Self { pub fn new(duration: u64, generator: ChatGenerator, core: AbacusAgentCore) -> Self {
Self { Self {
duration, duration,
generator, generator,
core, core,
home_lock: Arc::new(Mutex::new(())), outbox_lock: Arc::new(Mutex::new(())),
} }
} }
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl AbacusAgent for Kathy { impl Agent for Kathy {
const AGENT_NAME: &'static str = "kathy"; const AGENT_NAME: &'static str = "kathy";
type Settings = Settings; type Settings = Settings;
@ -42,26 +42,22 @@ impl AbacusAgent for Kathy {
Ok(Self::new( Ok(Self::new(
settings.interval.parse().expect("invalid u64"), settings.interval.parse().expect("invalid u64"),
settings.chat.into(), settings.chat.into(),
settings.base.try_into_core(Self::AGENT_NAME).await?, settings.base.try_into_abacus_core(Self::AGENT_NAME).await?,
)) ))
} }
}
impl Kathy {
#[tracing::instrument] #[tracing::instrument]
fn run(&self, name: &str) -> Instrumented<JoinHandle<Result<()>>> { fn run_inbox(&self, inbox: Arc<CachingInbox>) -> Instrumented<JoinHandle<Result<()>>> {
let replica_opt = self.replica_by_name(name); let outbox = self.outbox();
let name = name.to_owned(); let outbox_lock = self.outbox_lock.clone();
let home = self.home();
let home_lock = self.home_lock.clone();
let mut generator = self.generator.clone(); let mut generator = self.generator.clone();
let duration = Duration::from_secs(self.duration); let duration = Duration::from_secs(self.duration);
tokio::spawn(async move { tokio::spawn(async move {
if replica_opt.is_none() { let destination = inbox.local_domain();
bail!("No replica named {}", name);
}
let replica = replica_opt.unwrap();
let destination = replica.local_domain();
loop { loop {
let msg = generator.gen_chat(); let msg = generator.gen_chat();
@ -82,8 +78,8 @@ impl AbacusAgent for Kathy {
recipient = message.recipient recipient = message.recipient
); );
let guard = home_lock.lock().await; let guard = outbox_lock.lock().await;
home.dispatch(&message).await?; outbox.dispatch(&message).await?;
drop(guard); drop(guard);
} }
_ => { _ => {
@ -97,6 +93,27 @@ impl AbacusAgent for Kathy {
}) })
.in_current_span() .in_current_span()
} }
fn wrap_inbox_run(
&self,
inbox_name: &str,
inbox: Arc<CachingInbox>,
) -> Instrumented<JoinHandle<Result<()>>> {
let m = format!("Task for inbox named {} failed", inbox_name);
let handle = self.run_inbox(inbox).in_current_span();
let fut = async move { handle.await?.wrap_err(m) };
tokio::spawn(fut).in_current_span()
}
pub fn run(&self) -> Instrumented<JoinHandle<Result<()>>> {
let inbox_tasks: Vec<Instrumented<JoinHandle<Result<()>>>> = self
.inboxes()
.iter()
.map(|(inbox_name, inbox)| self.wrap_inbox_run(inbox_name, inbox.clone()))
.collect();
self.run_all(inbox_tasks)
}
} }
/// Generators for messages /// Generators for messages

@ -9,7 +9,7 @@ mod settings;
use color_eyre::Result; use color_eyre::Result;
use abacus_base::AbacusAgent; use abacus_base::Agent;
use crate::{kathy::Kathy, settings::KathySettings as Settings}; use crate::{kathy::Kathy, settings::KathySettings as Settings};
@ -26,7 +26,7 @@ async fn _main() -> Result<()> {
.start_tracing(agent.metrics().span_duration())?; .start_tracing(agent.metrics().span_duration())?;
let _ = agent.metrics().run_http_server(); let _ = agent.metrics().run_http_server();
agent.run_all().await? agent.run().await?
} }
fn main() -> Result<()> { fn main() -> Result<()> {

@ -1,32 +0,0 @@
[package]
name = "processor"
version = "0.1.0"
authors = ["anna-caroll <anna.s.carroll@gmail.com>"]
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }
config = "0.10"
serde = "1.0.120"
serde_json = { version = "1.0.61", default-features = false }
log = "0.4.13"
ethers = { git = "https://github.com/gakonst/ethers-rs", branch = "master" }
thiserror = { version = "1.0.22", default-features = false }
async-trait = { version = "0.1.42", default-features = false }
futures-util = "0.3.12"
color-eyre = "0.5.0"
tracing = "0.1.22"
tracing-futures = "0.2.4"
tracing-subscriber = "0.2.15"
rocksdb = { git = "https://github.com/rust-rocksdb/rust-rocksdb" }
abacus-core = { path = "../../abacus-core" }
abacus-base = { path = "../../abacus-base" }
paste = "1.0.5"
prometheus = "0.12"
rusoto_s3 = "0.47.0"
rusoto_core = "0.47.0"
[dev-dependencies]
abacus-test = { path = "../../abacus-test" }

@ -1,46 +0,0 @@
//! The processor observes replicas for updates and proves + processes them
//!
//! At a regular interval, the processor polls Replicas for updates.
//! If there are updates, the processor submits a proof of their
//! validity and processes on the Replica's chain
#![forbid(unsafe_code)]
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
mod processor;
mod prover;
mod prover_sync;
mod push;
mod settings;
use color_eyre::Result;
use crate::{processor::Processor, settings::ProcessorSettings};
use abacus_base::AbacusAgent;
async fn _main() -> Result<()> {
color_eyre::install()?;
let config = ProcessorSettings::new()?;
// TODO: top-level root span customizations?
let agent = Processor::from_settings(config).await?;
agent
.as_ref()
.settings
.tracing
.start_tracing(agent.metrics().span_duration())?;
let _ = agent.metrics().run_http_server();
agent.run_all().await??;
Ok(())
}
fn main() -> Result<()> {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(_main())
}

@ -1,503 +0,0 @@
use async_trait::async_trait;
use color_eyre::{
eyre::{bail, eyre},
Result,
};
use ethers::prelude::H256;
use futures_util::future::select_all;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::Duration,
};
use tokio::{sync::RwLock, task::JoinHandle, time::sleep};
use tracing::{debug, error, info, info_span, instrument, instrument::Instrumented, Instrument};
use abacus_base::{
cancel_task, decl_agent, AbacusAgent, AgentCore, CachingHome, CachingReplica,
ContractSyncMetrics, IndexDataTypes,
};
use abacus_core::{
accumulator::merkle::Proof, db::AbacusDB, CommittedMessage, Common, Home, HomeEvents,
MessageStatus,
};
use crate::{
prover_sync::ProverSync,
push::Pusher,
settings::{ProcessorSettings as Settings, S3Config},
};
const AGENT_NAME: &str = "processor";
static CURRENT_NONCE: &str = "current_nonce_";
enum Flow {
Advance,
Repeat,
}
/// The replica processor is responsible for polling messages and waiting until they validate
/// before proving/processing them.
#[derive(Debug)]
pub(crate) struct Replica {
interval: u64,
replica: Arc<CachingReplica>,
home: Arc<CachingHome>,
db: AbacusDB,
allowed: Option<Arc<HashSet<H256>>>,
denied: Option<Arc<HashSet<H256>>>,
next_message_nonce: Arc<prometheus::IntGaugeVec>,
message_leaf_index_gauge: prometheus::IntGauge,
}
impl std::fmt::Display for Replica {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"ReplicaProcessor: {{ home: {:?}, replica: {:?}, allowed: {:?}, denied: {:?} }}",
self.home, self.replica, self.allowed, self.denied
)
}
}
impl Replica {
#[instrument(skip(self), fields(self = %self))]
fn main(self) -> JoinHandle<Result<()>> {
tokio::spawn(
async move {
use abacus_core::Replica;
let replica_domain = self.replica.local_domain();
// The basic structure of this loop is as follows:
// 1. Get the last processed index
// 2. Check if the Home knows of a message above that index
// - If not, wait and poll again
// 3. Check if we have a proof for that message
// - If not, wait and poll again
// 4. Check if the proof is valid under the replica
// 5. Submit the proof to the replica
let mut next_message_nonce: u32 = self
.db
.retrieve_keyed_decodable(CURRENT_NONCE, &replica_domain)?
.map(|n: u32| n + 1)
.unwrap_or_default();
self.next_message_nonce
.with_label_values(&[self.home.name(), self.replica.name(), AGENT_NAME])
.set(next_message_nonce as i64);
info!(
replica_domain,
nonce = next_message_nonce,
replica = self.replica.name(),
"Starting processor for {}:{} at nonce {}",
self.replica.name(),
replica_domain,
next_message_nonce
);
let last_processed_message_leaf = self.get_last_processed_message_leaf(replica_domain, next_message_nonce).await;
self.message_leaf_index_gauge.set(last_processed_message_leaf);
loop {
let seq_span = tracing::trace_span!(
"ReplicaProcessor",
name = self.replica.name(),
nonce = next_message_nonce,
replica_domain = replica_domain,
home_domain = self.home.local_domain(),
);
match self
.try_msg_by_domain_and_nonce(replica_domain, next_message_nonce)
.instrument(seq_span)
.await
{
Ok(Flow::Advance) => {
self.db
.store_keyed_encodable(CURRENT_NONCE, &replica_domain, &next_message_nonce)?;
next_message_nonce += 1;
self.next_message_nonce
.with_label_values(&[
self.home.name(),
self.replica.name(),
AGENT_NAME,
])
.set(next_message_nonce as i64);
}
Ok(Flow::Repeat) => {
// there was some fault, let's wait and then try again later when state may have moved
debug!(
replica_domain,
nonce = next_message_nonce,
replica = self.replica.name(),
"Trying message failed, retrying. Replica: {}. Nonce: {}. Domain: {}.",
self.replica.name(),
next_message_nonce,
replica_domain,
);
sleep(Duration::from_secs(self.interval)).await
}
Err(e) => {
error!("fatal error in processor::Replica: {}", e);
bail!(e)
}
}
}
}
.in_current_span(),
)
}
// Attempt to get the leaf index of the previously processed message
async fn get_last_processed_message_leaf(&self, domain: u32, next_nonce: u32) -> i64 {
if next_nonce == 0 {
return 0;
}
match self.home.message_by_nonce(domain, next_nonce - 1).await {
Ok(Some(m)) => m.leaf_index as i64,
Ok(None) => 0,
Err(_) => 0,
}
}
/// Attempt to process a message.
///
/// Postcondition: ```match retval? {
/// Advance => message skipped ⊻ message was processed
/// Repeat => try again later
/// }```
///
/// In case of error: send help?
#[instrument(err, skip(self), fields(self = %self))]
async fn try_msg_by_domain_and_nonce(&self, domain: u32, nonce: u32) -> Result<Flow> {
use abacus_core::Replica;
let message = match self.home.message_by_nonce(domain, nonce).await {
Ok(Some(m)) => m,
Ok(None) => {
info!(
domain = domain,
sequence = nonce,
"Message not yet found {}:{}",
domain,
nonce,
);
return Ok(Flow::Repeat);
}
Err(e) => bail!(e),
};
info!(target: "seen_committed_messages", leaf_index = message.leaf_index);
let sender = message.message.sender;
// if we have an allow list, filter senders not on it
if let Some(false) = self.allowed.as_ref().map(|set| set.contains(&sender)) {
info!(
sender = ?sender,
nonce = nonce,
"Skipping message because sender not on allow list. Sender: {}. Domain: {}. Nonce: {}",
sender,
domain,
nonce
);
return Ok(Flow::Advance);
}
// if we have a deny list, filter senders on it
if let Some(true) = self.denied.as_ref().map(|set| set.contains(&sender)) {
info!(
sender = ?sender,
nonce = nonce,
"Skipping message because sender on deny list. Sender: {}. Domain: {}. Nonce: {}",
sender,
domain,
nonce
);
return Ok(Flow::Advance);
}
let proof = match self.db.proof_by_leaf_index(message.leaf_index) {
Ok(Some(p)) => p,
Ok(None) => {
info!(
leaf_hash = ?message.to_leaf(),
leaf_index = message.leaf_index,
"Proof not yet found"
);
return Ok(Flow::Repeat);
}
Err(e) => bail!(e),
};
if proof.leaf != message.to_leaf() {
let msg =
eyre!("Leaf in prover does not match retrieved message. Index: {}. Calculated: {}. Prover: {}.", message.leaf_index, message.to_leaf(), proof.leaf);
error!("{}", msg);
bail!(msg);
}
while !self.replica.acceptable_root(proof.root()).await? {
info!(
leaf_hash = ?message.to_leaf(),
leaf_index = message.leaf_index,
"Proof under {root} not yet valid here, waiting until Replica confirms",
root = proof.root(),
);
sleep(Duration::from_secs(self.interval)).await;
}
info!(
leaf_hash = ?message.to_leaf(),
leaf_index = message.leaf_index,
"Dispatching a message for processing {}:{}",
domain,
nonce
);
let leaf_index = message.leaf_index;
let process_outcome = self.process(message, proof).await;
match process_outcome {
Ok(()) => (),
Err(_) => return Ok(Flow::Repeat),
}
self.message_leaf_index_gauge.set(leaf_index as i64);
Ok(Flow::Advance)
}
#[instrument(err, level = "trace", skip(self), fields(self = %self))]
/// Dispatch a message for processing. If the message is already proven, process only.
async fn process(&self, message: CommittedMessage, proof: Proof) -> Result<()> {
use abacus_core::Replica;
let status = self.replica.message_status(message.to_leaf()).await?;
let tx_outcome;
match status {
MessageStatus::None => {
tx_outcome = self
.replica
.prove_and_process(message.as_ref(), &proof)
.await?;
}
MessageStatus::Proven => {
tx_outcome = self.replica.process(message.as_ref()).await?;
}
MessageStatus::Processed => {
info!(
domain = message.message.destination,
nonce = message.message.nonce,
leaf_index = message.leaf_index,
leaf = ?message.message.to_leaf(),
"Message {}:{} already processed",
message.message.destination,
message.message.nonce
);
return Ok(());
}
}
info!(
domain = message.message.destination,
nonce = message.message.nonce,
leaf_index = message.leaf_index,
leaf = ?message.message.to_leaf(),
"Processed message with tx hash {} and outcome {}. Destination: {}. Nonce: {}. Leaf index: {}.",
tx_outcome.txid,
tx_outcome.executed,
message.message.destination,
message.message.nonce,
message.leaf_index,
);
if !tx_outcome.executed {
let msg = eyre!("Process tx was not successful");
bail!(msg);
}
Ok(())
}
}
decl_agent!(
/// A processor agent
Processor {
interval: u64,
replica_tasks: RwLock<HashMap<String, JoinHandle<Result<()>>>>,
allowed: Option<Arc<HashSet<H256>>>,
denied: Option<Arc<HashSet<H256>>>,
index_only: HashMap<String, bool>,
next_message_nonce: Arc<prometheus::IntGaugeVec>,
config: Option<S3Config>,
}
);
impl Processor {
/// Instantiate a new processor
pub fn new(
interval: u64,
core: AgentCore,
allowed: Option<HashSet<H256>>,
denied: Option<HashSet<H256>>,
index_only: HashMap<String, bool>,
config: Option<S3Config>,
) -> Self {
let next_message_nonce = Arc::new(
core.metrics
.new_int_gauge(
"next_message_nonce",
"Index of the next message to inspect",
&["home", "replica", "agent"],
)
.expect("processor metric already registered -- should have be a singleton"),
);
Self {
interval,
core,
replica_tasks: Default::default(),
allowed: allowed.map(Arc::new),
denied: denied.map(Arc::new),
next_message_nonce,
index_only,
config,
}
}
}
#[async_trait]
#[allow(clippy::unit_arg)]
impl AbacusAgent for Processor {
const AGENT_NAME: &'static str = AGENT_NAME;
type Settings = Settings;
async fn from_settings(settings: Self::Settings) -> Result<Self>
where
Self: Sized,
{
let empty_map = HashMap::new();
Ok(Self::new(
settings.interval.parse().expect("invalid integer"),
settings.as_ref().try_into_core(AGENT_NAME).await?,
settings.allowed,
settings.denied,
settings.indexon.unwrap_or(empty_map),
settings.s3,
))
}
fn run(&self, name: &str) -> Instrumented<JoinHandle<Result<()>>> {
let home = self.home();
let next_message_nonce = self.next_message_nonce.clone();
let message_leaf_index_gauge = self
.core
.metrics
.last_known_message_leaf_index()
.with_label_values(&["process", self.home().name(), name]);
let interval = self.interval;
let db = AbacusDB::new(home.name(), self.db());
let replica_opt = self.replica_by_name(name);
let name = name.to_owned();
let allowed = self.allowed.clone();
let denied = self.denied.clone();
tokio::spawn(async move {
let replica = replica_opt.ok_or_else(|| eyre!("No replica named {}", name))?;
Replica {
interval,
replica,
home,
db,
allowed,
denied,
next_message_nonce,
message_leaf_index_gauge,
}
.main()
.await?
})
.in_current_span()
}
fn run_all(self) -> Instrumented<JoinHandle<Result<()>>>
where
Self: Sized + 'static,
{
tokio::spawn(async move {
info!("Starting Processor tasks");
// tree sync
info!("Starting ProverSync");
let db = AbacusDB::new(self.home().name().to_owned(), self.db());
let sync = ProverSync::from_disk(db.clone());
let prover_sync_task = sync.spawn();
info!("Starting indexer");
let sync_metrics = ContractSyncMetrics::new(
self.metrics(),
Some(&["dispatch", self.home().name(), "unknown"]),
);
let index_settings = self.as_ref().indexer.clone();
let home_sync_task = self.home().sync(
Self::AGENT_NAME.to_owned(),
index_settings,
sync_metrics,
IndexDataTypes::Both,
);
info!("started indexer and sync");
// instantiate task array here so we can optionally push run_task
let mut tasks = vec![home_sync_task, prover_sync_task];
// Filter out the index_only replicas
let names: Vec<&str> = self
.replicas()
.keys()
.filter(|k| !self.index_only.contains_key(k.as_str()))
.map(|k| k.as_str())
.collect();
info!(
"Starting Processor tasks {:?}, config is {:?}",
&names, self.index_only
);
tasks.push(self.run_many(&names));
// if we have a bucket, add a task to push to it
if let Some(config) = &self.config {
info!(bucket = %config.bucket, "Starting S3 push tasks");
tasks.push(
Pusher::new(
self.core.home.name(),
&config.bucket,
config.region.parse().expect("invalid s3 region"),
db.clone(),
self.core
.metrics
.last_known_message_leaf_index()
.with_label_values(&["proving", self.core.home.name(), "unknown"]),
)
.spawn(),
)
}
// find the first task to shut down. Then cancel all others
debug!(tasks = tasks.len(), "Selecting across Processor tasks");
let (res, _, remaining) = select_all(tasks).await;
for task in remaining.into_iter() {
cancel_task!(task);
}
res?
})
.instrument(info_span!("Processor::run_all"))
}
}

@ -1,176 +0,0 @@
//! Prover process: generate proofs in the tree.
//!
//! Struct responsible for syncing Prover
use ethers::core::types::H256;
use abacus_core::accumulator::{
merkle::{merkle_root_from_branch, MerkleTree, MerkleTreeError, Proof},
TREE_DEPTH,
};
/// A depth-32 sparse Merkle tree capable of producing proofs for arbitrary
/// elements.
#[derive(Debug)]
pub struct Prover {
count: usize,
tree: MerkleTree,
}
/// Prover Errors
#[derive(Debug, thiserror::Error)]
pub enum ProverError {
/// Index is above tree max size
#[error("Requested proof for index above u32::MAX: {0}")]
IndexTooHigh(usize),
/// Requested proof for a zero element
#[error("Requested proof for a zero element. Requested: {index}. Tree has: {count}")]
ZeroProof {
/// The index requested
index: usize,
/// The number of leaves
count: usize,
},
/// Bubbled up from underlying
#[error(transparent)]
MerkleTreeError(#[from] MerkleTreeError),
/// Failed proof verification
#[error("Proof verification failed. Root is {expected}, produced is {actual}")]
#[allow(dead_code)]
VerificationFailed {
/// The expected root (this tree's current root)
expected: H256,
/// The root produced by branch evaluation
actual: H256,
},
}
impl Default for Prover {
fn default() -> Self {
let full = MerkleTree::create(&[], TREE_DEPTH);
Self {
count: 0,
tree: full,
}
}
}
impl Prover {
/// Push a leaf to the tree. Appends it to the first unoccupied slot
///
/// This will fail if the underlying tree is full.
pub fn ingest(&mut self, element: H256) -> Result<H256, ProverError> {
self.count += 1;
self.tree.push_leaf(element, TREE_DEPTH)?;
Ok(self.tree.hash())
}
/// Return the current root hash of the tree
pub fn root(&self) -> H256 {
self.tree.hash()
}
/// Return the number of leaves that have been ingested
pub fn count(&self) -> usize {
self.count
}
/// Create a proof of a leaf in this tree.
///
/// Note, if the tree ingests more leaves, the root will need to be recalculated.
pub fn prove(&self, index: usize) -> Result<Proof, ProverError> {
if index > u32::MAX as usize {
return Err(ProverError::IndexTooHigh(index));
}
let count = self.count();
if index >= count {
return Err(ProverError::ZeroProof { index, count });
}
let (leaf, hashes) = self.tree.generate_proof(index, TREE_DEPTH);
let mut path = [H256::zero(); 32];
path.copy_from_slice(&hashes[..32]);
Ok(Proof { leaf, index, path })
}
/// Verify a proof against this tree's root.
#[allow(dead_code)]
pub fn verify(&self, proof: &Proof) -> Result<(), ProverError> {
let actual = merkle_root_from_branch(proof.leaf, &proof.path, TREE_DEPTH, proof.index);
let expected = self.root();
if expected == actual {
Ok(())
} else {
Err(ProverError::VerificationFailed { expected, actual })
}
}
}
impl<T> From<T> for Prover
where
T: AsRef<[H256]>,
{
fn from(t: T) -> Self {
let slice = t.as_ref();
Self {
count: slice.len(),
tree: MerkleTree::create(slice, TREE_DEPTH),
}
}
}
impl std::iter::FromIterator<H256> for Prover {
/// Will panic if the tree fills
fn from_iter<I: IntoIterator<Item = H256>>(iter: I) -> Self {
let mut prover = Self::default();
prover.extend(iter);
prover
}
}
impl std::iter::Extend<H256> for Prover {
/// Will panic if the tree fills
fn extend<I: IntoIterator<Item = H256>>(&mut self, iter: I) {
for i in iter {
self.ingest(i).expect("!tree full");
}
}
}
#[cfg(test)]
mod test {
use super::*;
use abacus_core::test_utils;
use ethers::utils::hash_message;
#[test]
fn it_produces_and_verifies_proofs() {
let test_cases = test_utils::load_merkle_test_json();
for test_case in test_cases.iter() {
let mut tree = Prover::default();
// insert the leaves
for leaf in test_case.leaves.iter() {
let hashed_leaf = hash_message(leaf);
tree.ingest(hashed_leaf).unwrap();
}
// assert the tree has the proper leaf count
assert_eq!(tree.count(), test_case.leaves.len());
// assert the tree generates the proper root
let root = tree.root(); // root is type H256
assert_eq!(root, test_case.expected_root);
for n in 0..test_case.leaves.len() {
// assert the tree generates the proper proof for this leaf
let proof = tree.prove(n).unwrap();
assert_eq!(proof, test_case.proofs[n]);
// check that the tree can verify the proof for this leaf
tree.verify(&proof).unwrap();
}
}
}
}

@ -1,328 +0,0 @@
use crate::prover::{Prover, ProverError};
use abacus_core::{
accumulator::{incremental::IncrementalMerkle, INITIAL_ROOT},
db::{AbacusDB, DbError},
ChainCommunicationError,
};
use color_eyre::eyre::{bail, Result};
use ethers::core::types::H256;
use std::{fmt::Display, ops::Range, time::Duration};
use tokio::{task::JoinHandle, time::sleep};
use tracing::{debug, error, info, info_span, instrument, instrument::Instrumented, Instrument};
/// Struct to sync prover.
#[derive(Debug)]
pub struct ProverSync {
db: AbacusDB,
prover: Prover,
incremental: IncrementalMerkle,
}
impl Display for ProverSync {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "ProverSync {{ ")?;
write!(
f,
"incremental: {{ root: {:?}, size: {} }}, ",
self.incremental.root(),
self.incremental.count()
)?;
write!(
f,
"prover: {{ root: {:?}, size: {} }} ",
self.prover.root(),
self.prover.count()
)?;
write!(f, "}}")?;
Ok(())
}
}
/// ProverSync errors
#[derive(Debug, thiserror::Error)]
pub enum ProverSyncError {
/// Local tree up-to-date but root does not match signed update"
#[error("Local tree up-to-date but root does not match update. Local root: {local_root}. Update root: {new_root}. WARNING: this could indicate malicious updater and/or long reorganization process!")]
MismatchedRoots {
/// Root of prover's local merkle tree
local_root: H256,
/// New root contained in signed update
new_root: H256,
},
/// Local root was never signed by updater and submitted to Home.
#[error("Local root {local_root:?} was never signed by updater and submitted to Home.")]
InvalidLocalRoot {
/// Root of prover's local merkle tree
local_root: H256,
},
/// ProverSync attempts Prover operation and receives ProverError
#[error(transparent)]
ProverError(#[from] ProverError),
/// ProverSync receives ChainCommunicationError from chain API
#[error(transparent)]
ChainCommunicationError(#[from] ChainCommunicationError),
/// DB Error
#[error("{0}")]
DbError(#[from] DbError),
}
impl ProverSync {
fn store_proof(&self, leaf_index: u32) -> Result<(), ProverSyncError> {
match self.prover.prove(leaf_index as usize) {
Ok(proof) => {
self.db.store_proof(leaf_index, &proof)?;
info!(
leaf_index,
root = ?self.prover.root(),
"Storing proof for leaf {}",
leaf_index
);
Ok(())
}
// ignore the storage request if it's out of range (e.g. leaves
// up-to-date but no update containing leaves produced yet)
Err(ProverError::ZeroProof { index: _, count: _ }) => Ok(()),
// bubble up any other errors
Err(e) => Err(e.into()),
}
}
/// Given rocksdb handle `db` containing merkle tree leaves,
/// instantiates new `ProverSync` and fills prover's merkle tree
#[instrument(level = "debug", skip(db))]
pub fn from_disk(db: AbacusDB) -> Self {
// Ingest all leaves in db into prover tree
let mut prover = Prover::default();
let mut incremental = IncrementalMerkle::default();
if let Some(root) = db.retrieve_latest_root().expect("db error") {
for i in 0.. {
match db.leaf_by_leaf_index(i) {
Ok(Some(leaf)) => {
debug!(leaf_index = i, "Ingesting leaf from_disk");
prover.ingest(leaf).expect("!tree full");
incremental.ingest(leaf);
assert_eq!(prover.root(), incremental.root());
if prover.root() == root {
break;
}
}
Ok(None) => break,
Err(e) => {
error!(error = %e, "Error in ProverSync::from_disk");
panic!("Error in ProverSync::from_disk");
}
}
}
info!(target_latest_root = ?root, root = ?incremental.root(), "Reloaded ProverSync from disk");
}
let sync = Self {
prover,
incremental,
db,
};
// Ensure proofs exist for all leaves
for i in 0..sync.prover.count() as u32 {
match (
sync.db.leaf_by_leaf_index(i).expect("db error"),
sync.db.proof_by_leaf_index(i).expect("db error"),
) {
(Some(_), None) => sync.store_proof(i).expect("db error"),
(None, _) => break,
_ => {}
}
}
sync
}
// The current canonical local root. This is the root that the full
// prover currently has. If that root is the initial root, it is 0.
fn local_root(&self) -> H256 {
let root = self.prover.root();
if root == *INITIAL_ROOT {
H256::zero()
} else {
root
}
}
// expensive and poorly done
async fn get_leaf_range(&self, range: Range<usize>) -> Result<Vec<H256>, ProverSyncError> {
let mut leaves = vec![];
for i in range {
let leaf = self.db.wait_for_leaf(i as u32).await?;
leaves.push(leaf);
}
Ok(leaves)
}
/// First attempt to update incremental merkle tree with all leaves
/// produced between `local_root` and `new_root`. If successful (i.e.
/// incremental tree is updated until its root equals the `new_root`),
/// commit to changes by batch updating the prover's actual merkle tree.
#[tracing::instrument(err, skip(self, local_root, new_root), fields(self = %self, local_root = ?local_root, new_root = ?new_root))]
async fn update_full(
&mut self,
local_root: H256,
new_root: H256,
) -> Result<(), ProverSyncError> {
// If roots don't match by end of incremental update, will return
// MismatchedRoots error.
// We destructure the range here to avoid cloning it several times
// later on.
let Range { start, end } = self.update_incremental(local_root, new_root).await?;
// Check that local root still equals prover's root just in case
// another entity wrote to prover while we were building the leaf
// vector. If roots no longer match, return Ok(()) and restart
// poll_updates loop.
if local_root != self.local_root() {
info!("ProverSync: Root mismatch during update. Resuming loop.");
return Ok(());
}
// Extend in-memory tree
info!("Committing leaves {}..{} to prover.", start, end);
let leaves = self.get_leaf_range(start..end).await?;
let num_leaves = leaves.len();
self.prover.extend(leaves.into_iter());
info!("Committed {} leaves to prover.", num_leaves);
if new_root != self.prover.root() {
error!(
start = ?local_root,
expected = ?new_root,
actual = ?self.prover.root(),
"Prover in unexpected state after committing leaves"
);
return Err(ProverSyncError::MismatchedRoots {
local_root: self.prover.root(),
new_root,
});
}
// if we don't already have a proof in the DB
// calculate a proof under the current root for each leaf
// store all calculated proofs in the db
// TODO(luke): refactor prover_sync so we dont have to iterate over every leaf (match from_disk implementation)
for idx in 0..self.prover.count() {
if self.db.proof_by_leaf_index(idx as u32)?.is_none() {
self.store_proof(idx as u32)?;
}
}
Ok(())
}
/// Given `local_root` and `new_root` from signed update, ingest leaves
/// into incremental merkle one-by-one until local root matches new root
/// and return ingested leaves if successful. If incremental merkle is
/// up-to-date with update but roots still don't match, return
/// `MismatchedRoots` error.
#[instrument(err, skip(self), fields(self = %self))]
async fn update_incremental(
&mut self,
local_root: H256,
new_root: H256,
) -> Result<Range<usize>, ProverSyncError> {
// Create copy of ProverSync's incremental so we can easily discard
// changes in case of bad updates
let mut incremental = self.incremental;
let mut current_root = local_root;
let start = incremental.count();
let mut tree_size = start;
info!(
local_root = ?local_root,
new_root = ?new_root,
"Local root is {}, going to root {}",
local_root,
new_root
);
let mut leaves = vec![];
while current_root != new_root {
info!(
current_root = ?local_root,
index = tree_size,
"Retrieving next leaf, at index {}",
tree_size
);
// As we fill the incremental merkle, its tree_size will always be
// equal to the index of the next leaf we want (e.g. if tree_size
// is 3, we want the 4th leaf, which is at index 3)
let leaf = self.db.wait_for_leaf(tree_size as u32).await?;
info!(
index = tree_size,
leaf = ?leaf,
"Leaf at index {} is {}",
tree_size,
leaf
);
incremental.ingest(leaf);
leaves.push(leaf);
current_root = incremental.root();
tree_size = incremental.count();
}
// If local incremental tree is up-to-date but doesn't match new
// root, bubble up MismatchedRoots error
if current_root != new_root {
return Err(ProverSyncError::MismatchedRoots {
local_root: current_root,
new_root,
});
}
info!("Committing leaves {}..{} to incremental.", start, tree_size);
self.incremental = incremental;
assert!(incremental.root() == new_root);
Ok(start..tree_size)
}
/// Consume self and poll for signed updates at regular interval. Update
/// local merkle tree with all leaves between local root and
/// new root. Use short interval for bootup syncing and longer
/// interval for regular polling.
pub fn spawn(mut self) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("ProverSync", self = %self);
tokio::spawn(async move {
loop {
let local_root = self.local_root();
let signed_update_opt = self.db.update_by_previous_root(local_root)?;
// This if block is somewhat ugly.
// First we check if there is a signed update with the local root.
// If so we start ingesting messages under the new root.
// Otherwise, if there is no update,
// We ignore the initial root
// We ensure that an update produced the local root.
// If no update produced the local root, we error.
if let Some(signed_update) = signed_update_opt {
info!(
"Have signed update from {} to {}",
signed_update.update.previous_root, signed_update.update.new_root,
);
self.update_full(local_root, signed_update.update.new_root)
.await?;
} else if !local_root.is_zero() && self.db.update_by_new_root(local_root)?.is_none()
{
bail!(ProverSyncError::InvalidLocalRoot { local_root });
}
// kludge
sleep(Duration::from_millis(100)).await;
}
})
.instrument(span)
}
}

@ -1,147 +0,0 @@
use std::time::Duration;
use ethers::utils::keccak256;
use rusoto_core::{credential::EnvironmentProvider, HttpClient, Region, RusotoError};
use rusoto_s3::{GetObjectError, GetObjectRequest, PutObjectRequest, S3Client, S3};
use color_eyre::eyre::{bail, eyre, Result};
use abacus_core::{accumulator::merkle::Proof, db::AbacusDB};
use tokio::{task::JoinHandle, time::sleep};
use tracing::{debug, info, info_span, instrument::Instrumented, Instrument};
#[derive(serde::Serialize, serde::Deserialize)]
struct ProvenMessage {
message: Vec<u8>,
proof: Proof,
}
/// Pushes proofs to an S3 bucket
pub struct Pusher {
name: String,
bucket: String,
region: Region,
db: AbacusDB,
client: S3Client,
message_leaf_index_gauge: prometheus::IntGauge,
}
impl std::fmt::Debug for Pusher {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Pusher")
.field("region", &self.region)
.field("bucket", &self.bucket)
.field("name", &self.name)
.finish()
}
}
impl Pusher {
/// Instantiate a new pusher with a region
pub fn new(
name: &str,
bucket: &str,
region: Region,
db: AbacusDB,
message_leaf_index_gauge: prometheus::IntGauge,
) -> Self {
let client = S3Client::new_with(
HttpClient::new().unwrap(),
EnvironmentProvider::default(),
region.clone(),
);
Self {
name: name.to_owned(),
bucket: bucket.to_owned(),
region,
db,
client,
message_leaf_index_gauge,
}
}
async fn upload_proof(&self, proven: &ProvenMessage) -> Result<()> {
let key = self.key(proven);
let proof_json = Vec::from(serde_json::to_string_pretty(proven)?);
info!(
leaf = ?proven.proof.leaf,
leaf_index = proven.proof.index,
key = %key,
"Storing proof in s3 bucket",
);
let req = PutObjectRequest {
key,
bucket: self.bucket.clone(),
body: Some(proof_json.into()),
content_type: Some("application/json".to_owned()),
..Default::default()
};
self.client.put_object(req).await?;
Ok(())
}
async fn already_uploaded(&self, proven: &ProvenMessage) -> Result<bool> {
let req = GetObjectRequest {
key: self.key(proven),
bucket: self.bucket.clone(),
..Default::default()
};
let resp = self.client.get_object(req).await;
match resp {
Ok(_) => {
debug!(
leaf = ?proven.proof.leaf,
leaf_index = proven.proof.index,
key = %self.key(proven),
"Proof already stored in bucket"
);
Ok(true)
}
Err(RusotoError::Service(GetObjectError::NoSuchKey(_))) => Ok(false),
Err(e) => bail!(e),
}
}
fn key(&self, proven: &ProvenMessage) -> String {
format!("{}_{}", self.name, proven.proof.index)
}
/// Spawn the pusher task and return a joinhandle
///
/// The pusher task polls the DB for new proofs and attempts to push them
/// to an S3 bucket
pub fn spawn(self) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!(
"ProofPusher",
bucket = %self.bucket,
region = self.region.name(),
home = %self.name,
);
tokio::spawn(async move {
let mut index = 0;
loop {
let proof = self.db.proof_by_leaf_index(index)?;
match proof {
Some(proof) => {
let message = self
.db
.message_by_leaf_index(index)?
.map(|message| message.message)
.ok_or_else(|| eyre!("Missing message for known proof"))?;
debug_assert_eq!(keccak256(&message), *proof.leaf.as_fixed_bytes());
let proven = ProvenMessage { proof, message };
// upload if not already present
if !self.already_uploaded(&proven).await? {
self.upload_proof(&proven).await?;
}
self.message_leaf_index_gauge.set(index as i64);
index += 1;
}
None => sleep(Duration::from_millis(500)).await,
}
}
})
.instrument(span)
}
}

@ -1,25 +0,0 @@
//! Configuration
use ethers::prelude::H256;
use serde::Deserialize;
use std::collections::{HashMap, HashSet};
use abacus_base::decl_settings;
#[derive(Debug, Deserialize, Clone)]
pub struct S3Config {
pub bucket: String,
pub region: String,
}
decl_settings!(Processor {
/// The polling interval (in seconds)
interval: String,
/// An allow list of message senders
allowed: Option<HashSet<H256>>,
/// A deny list of message senders
denied: Option<HashSet<H256>>,
/// Only index transactions if this key is set
indexon: Option<HashMap<String, bool>>,
/// An amazon aws s3 bucket to push proofs to
s3: Option<S3Config>,
});

@ -1,7 +1,7 @@
//! The relayer forwards signed updates from the home to chain to replicas //! The relayer forwards signed checkpoints from the outbox to chain to inboxes
//! //!
//! At a regular interval, the relayer polls Home for signed updates and //! At a regular interval, the relayer polls Outbox for signed checkpoints and
//! submits them as updates with a pending timelock on the replica. //! submits them as checkpoints on the inbox.
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
#![warn(missing_docs)] #![warn(missing_docs)]

@ -20,7 +20,7 @@ pub struct Relayer {
relayer_message_processing: bool, relayer_message_processing: bool,
checkpoint_syncer: CheckpointSyncers, checkpoint_syncer: CheckpointSyncers,
core: AbacusAgentCore, core: AbacusAgentCore,
updates_relayed_count: Arc<prometheus::IntCounterVec>, checkpoints_relayed_count: Arc<prometheus::IntCounterVec>,
} }
impl AsRef<AbacusAgentCore> for Relayer { impl AsRef<AbacusAgentCore> for Relayer {
@ -40,12 +40,12 @@ impl Relayer {
checkpoint_syncer: CheckpointSyncers, checkpoint_syncer: CheckpointSyncers,
core: AbacusAgentCore, core: AbacusAgentCore,
) -> Self { ) -> Self {
let updates_relayed_count = Arc::new( let checkpoints_relayed_count = Arc::new(
core.metrics core.metrics
.new_int_counter( .new_int_counter(
"updates_relayed_count", "checkpoints_relayed_count",
"Number of updates relayed from given home to replica", "Number of checkpoints relayed from given outbox to inbox",
&["home", "replica", "agent"], &["outbox", "inbox", "agent"],
) )
.expect("processor metric already registered -- should have be a singleton"), .expect("processor metric already registered -- should have be a singleton"),
); );
@ -57,7 +57,7 @@ impl Relayer {
relayer_message_processing, relayer_message_processing,
checkpoint_syncer, checkpoint_syncer,
core, core,
updates_relayed_count, checkpoints_relayed_count,
} }
} }
} }

@ -1,34 +0,0 @@
[package]
name = "updater"
version = "0.1.0"
authors = ["James Prestwich <prestwich@clabs.co>"]
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }
config = "0.11.0"
serde = "1.0.120"
serde_json = { version = "1.0.61", default-features = false }
log = "0.4.13"
ethers = { git = "https://github.com/gakonst/ethers-rs", branch = "master" }
thiserror = { version = "1.0.22", default-features = false }
async-trait = { version = "0.1.42", default-features = false }
futures-util = "0.3.12"
color-eyre = "0.5.0"
tracing = "0.1.22"
tracing-futures = "0.2.4"
tracing-subscriber = "0.2.15"
rocksdb = { git = "https://github.com/rust-rocksdb/rust-rocksdb" }
abacus-core = { path = "../../abacus-core" }
abacus-base = { path = "../../abacus-base" }
abacus-ethereum = { path = "../../chains/abacus-ethereum" }
paste = "1.0.5"
prometheus = "0.12"
warp = "0.3"
hex = "0.4.3"
[dev-dependencies]
mockall = "0.9.1"
abacus-test = { path = "../../abacus-test" }

@ -1,67 +0,0 @@
//! The updater signs updates and submits them to the home chain.
//!
//! This updater polls the Home for queued updates at a regular interval.
//! It signs them and submits them back to the home chain.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
mod produce;
mod settings;
mod submit;
mod updater;
use color_eyre::Result;
use futures_util::future::select_all;
use abacus_base::{cancel_task, AbacusAgent, ContractSyncMetrics, IndexDataTypes};
use crate::{settings::UpdaterSettings as Settings, updater::Updater};
#[allow(unused_must_use)]
async fn _main() -> Result<()> {
color_eyre::install()?;
let settings = Settings::new()?;
let agent = Updater::from_settings(settings).await?;
agent
.as_ref()
.settings
.tracing
.start_tracing(agent.metrics().span_duration())?;
let _ = agent.metrics().run_http_server();
// this is deliberately different from other agents because the updater
// does not run replicas. As a result, most of the contents of run_all are
// broken out here
let sync_metrics = ContractSyncMetrics::new(agent.metrics(), None);
let index_settings = agent.as_ref().indexer.clone();
let sync_task = agent.home().sync(
Updater::AGENT_NAME.to_owned(),
index_settings,
sync_metrics,
IndexDataTypes::Updates,
);
let run_task = agent.run("");
let futs = vec![sync_task, run_task];
let (_, _, remaining) = select_all(futs).await;
for task in remaining.into_iter() {
cancel_task!(task);
}
Ok(())
}
fn main() -> Result<()> {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(_main())
}

@ -1,139 +0,0 @@
use ethers::core::types::H256;
use prometheus::IntCounterVec;
use std::{sync::Arc, time::Duration};
use abacus_base::{AbacusAgent, CachingHome};
use abacus_core::{db::AbacusDB, Common, Home, Signers};
use color_eyre::Result;
use tokio::{task::JoinHandle, time::sleep};
use tracing::{debug, info, info_span, instrument::Instrumented, Instrument};
use crate::updater::Updater;
#[derive(Debug)]
pub(crate) struct UpdateProducer {
home: Arc<CachingHome>,
db: AbacusDB,
signer: Arc<Signers>,
interval_seconds: u64,
update_pause: u64,
signed_attestation_count: IntCounterVec,
}
impl UpdateProducer {
pub(crate) fn new(
home: Arc<CachingHome>,
db: AbacusDB,
signer: Arc<Signers>,
interval_seconds: u64,
update_pause: u64,
signed_attestation_count: IntCounterVec,
) -> Self {
Self {
home,
db,
signer,
interval_seconds,
update_pause,
signed_attestation_count,
}
}
fn find_latest_root(&self) -> Result<H256> {
// If db latest root is empty, this will produce `H256::default()`
// which is equal to `H256::zero()`
Ok(self.db.retrieve_latest_root()?.unwrap_or_default())
}
pub(crate) fn spawn(self) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("UpdateProducer");
tokio::spawn(async move {
loop {
// We sleep at the top to make continues work fine
sleep(Duration::from_secs(self.interval_seconds)).await;
let current_root = self.find_latest_root()?;
if let Some(suggested) = self.home.produce_update().await? {
if suggested.previous_root != current_root {
// This either indicates that the indexer is catching
// up or that the chain is awaiting a new update. We
// should ignore it.
// Hack: Sometimes the indexers misses the update which causes
// the updater to stay stuck forever. We should detect those
// situations and "auto-heal"
if let Some(previously_produced_update) = self.db.retrieve_produced_update(current_root)? {
if previously_produced_update.update.previous_root == current_root && previously_produced_update.update.new_root == suggested.previous_root {
info!(
previous_root = ?previously_produced_update.update.previous_root,
new_root = ?previously_produced_update.update.new_root,
suggested_new_root = ?suggested.new_root,
"Suggested previous root matches produced previous update in DB"
);
self.db.store_latest_update(&previously_produced_update)?;
}
}
debug!(
local = ?suggested.previous_root,
remote = ?current_root,
"Local root not equal to chain root. Skipping update."
);
continue;
}
// Ensure we have not already signed a conflicting update.
// Ignore suggested if we have.
if let Some(existing) = self.db.retrieve_produced_update(suggested.previous_root)? {
if existing.update.new_root != suggested.new_root {
info!("Updater ignoring conflicting suggested update. Indicates chain awaiting already produced update. Existing update: {:?}. Suggested conflicting update: {:?}.", &existing, &suggested);
continue;
}
}
// Sleep for `update_pause` seconds so we can check for
// unwanted state changes afterwards
sleep(Duration::from_secs(self.update_pause)).await;
// If HomeIndexer found new root from that doesn't
// match our most current root, continue
if self.find_latest_root()? != current_root {
continue;
}
// If home produced update builds off a different root than
// our suggested update's previous root, continue
if let Some(check_suggested) = self.home.produce_update().await? {
if check_suggested.previous_root != suggested.previous_root {
continue;
}
} else {
continue;
}
// If the suggested matches our local view, sign an update
// and store it as locally produced
let signed = suggested.sign_with(self.signer.as_ref()).await?;
self.signed_attestation_count
.with_label_values(&[self.home.name(), Updater::AGENT_NAME])
.inc();
let hex_signature = format!("0x{}", hex::encode(signed.signature.to_vec()));
info!(
previous_root = ?signed.update.previous_root,
new_root = ?signed.update.new_root,
hex_signature = %hex_signature,
"Storing new update in DB for broadcast"
);
self.db.store_produced_update(&signed)?;
}
}
})
.instrument(span)
}
}

@ -1,13 +0,0 @@
//! Configuration
use abacus_base::*;
decl_settings!(Updater {
/// The updater attestation signer
updater: abacus_base::SignerConf,
/// The polling interval (in seconds)
interval: String,
/// The delay (in seconds) before an updater will attempt to submit a
/// signed update. This prevents accidental slashing due to reorgs on
/// chains with slow or probabilistic finality
pause: String,
});

@ -1,71 +0,0 @@
use std::sync::Arc;
use abacus_base::{AbacusAgent, CachingHome};
use abacus_core::{db::AbacusDB, Common};
use prometheus::IntCounterVec;
use std::time::Duration;
use color_eyre::Result;
use tokio::{task::JoinHandle, time::sleep};
use tracing::{info, info_span, instrument::Instrumented, Instrument};
use crate::updater::Updater;
pub(crate) struct UpdateSubmitter {
home: Arc<CachingHome>,
db: AbacusDB,
interval_seconds: u64,
submitted_update_count: IntCounterVec,
}
impl UpdateSubmitter {
pub(crate) fn new(
home: Arc<CachingHome>,
db: AbacusDB,
interval_seconds: u64,
submitted_update_count: IntCounterVec,
) -> Self {
Self {
home,
db,
interval_seconds,
submitted_update_count,
}
}
pub(crate) fn spawn(self) -> Instrumented<JoinHandle<Result<()>>> {
let span = info_span!("UpdateSubmitter");
tokio::spawn(async move {
// start from the chain state
let mut committed_root = self.home.committed_root().await?;
info!(committed_root = ?committed_root, "Updater submitter start");
loop {
sleep(Duration::from_secs(self.interval_seconds)).await;
// if we have produced an update building off the committed root
// submit it
if let Some(signed) = self.db.retrieve_produced_update(committed_root)? {
let hex_signature = format!("0x{}", hex::encode(signed.signature.to_vec()));
info!(
previous_root = ?signed.update.previous_root,
new_root = ?signed.update.new_root,
hex_signature = %hex_signature,
"Submitting update to chain"
);
self.home.update(&signed).await?;
self.submitted_update_count
.with_label_values(&[self.home.name(), Updater::AGENT_NAME])
.inc();
// continue from local state
committed_root = signed.update.new_root;
}
}
})
.instrument(span)
}
}

@ -1,136 +0,0 @@
use std::sync::Arc;
use async_trait::async_trait;
use color_eyre::{eyre::ensure, Result};
use ethers::{signers::Signer, types::Address};
use futures_util::future::select_all;
use prometheus::IntCounterVec;
use tokio::task::JoinHandle;
use tracing::{info, instrument::Instrumented, Instrument};
use crate::{
produce::UpdateProducer, settings::UpdaterSettings as Settings, submit::UpdateSubmitter,
};
use abacus_base::{AbacusAgent, AgentCore};
use abacus_core::{db::AbacusDB, Common, Signers};
/// An updater agent
#[derive(Debug)]
pub struct Updater {
signer: Arc<Signers>,
interval_seconds: u64,
update_pause: u64,
pub(crate) core: AgentCore,
signed_attestation_count: IntCounterVec,
submitted_update_count: IntCounterVec,
}
impl AsRef<AgentCore> for Updater {
fn as_ref(&self) -> &AgentCore {
&self.core
}
}
impl Updater {
/// Instantiate a new updater
pub fn new(signer: Signers, interval_seconds: u64, update_pause: u64, core: AgentCore) -> Self {
let signed_attestation_count = core
.metrics
.new_int_counter(
"signed_attestation_count",
"Number of attestations signed",
&["network", "agent"],
)
.expect("must be able to register agent metrics");
let submitted_update_count = core
.metrics
.new_int_counter(
"submitted_update_count",
"Number of updates successfully submitted to home",
&["network", "agent"],
)
.expect("must be able to register agent metrics");
Self {
signer: Arc::new(signer),
interval_seconds,
update_pause,
core,
signed_attestation_count,
submitted_update_count,
}
}
}
#[async_trait]
// This is a bit of a kludge to make from_settings work.
// Ideally this hould be generic across all signers.
// Right now we only have one
impl AbacusAgent for Updater {
const AGENT_NAME: &'static str = "updater";
type Settings = Settings;
async fn from_settings(settings: Self::Settings) -> Result<Self>
where
Self: Sized,
{
let signer = settings.updater.try_into_signer().await?;
let interval_seconds = settings.interval.parse().expect("invalid uint");
let update_pause = settings.pause.parse().expect("invalid uint");
let core = settings.as_ref().try_into_core(Self::AGENT_NAME).await?;
Ok(Self::new(signer, interval_seconds, update_pause, core))
}
fn run(&self, _replica: &str) -> Instrumented<JoinHandle<Result<()>>> {
// First we check that we have the correct key to sign with.
let home = self.home();
let address = self.signer.address();
let db = AbacusDB::new(self.home().name(), self.db());
info!(
"Updater is running with interval {:?} and pause {:?}",
&self.interval_seconds, &self.update_pause
);
let produce = UpdateProducer::new(
self.home(),
db.clone(),
self.signer.clone(),
self.interval_seconds,
self.update_pause,
self.signed_attestation_count.clone(),
);
let submit = UpdateSubmitter::new(
self.home(),
db,
self.interval_seconds,
self.submitted_update_count.clone(),
);
tokio::spawn(async move {
let expected: Address = home.updater().await?.into();
ensure!(
expected == address,
"Contract updater does not match keys. On-chain: {}. Local: {}",
expected,
address
);
let produce_task = produce.spawn();
let submit_task = submit.spawn();
let (res, _, rem) = select_all(vec![produce_task, submit_task]).await;
for task in rem.into_iter() {
task.into_inner().abort();
}
res?
})
.in_current_span()
}
}
#[cfg(test)]
mod test {}

@ -1,31 +0,0 @@
[package]
name = "watcher"
version = "0.1.0"
authors = ["Luke Tchang <ltchang@stanford.edu>"]
edition = "2021"
[dependencies]
tokio = { version = "1.0.1", features = ["rt", "macros"] }
config = "0.10"
serde = "1.0.120"
serde_json = { version = "1.0.61", default-features = false }
log = "0.4.13"
ethers = { git = "https://github.com/gakonst/ethers-rs", branch = "master" }
thiserror = { version = "1.0.22", default-features = false }
async-trait = { version = "0.1.42", default-features = false }
futures-util = "0.3.12"
color-eyre = "0.5.0"
tracing = "0.1.22"
tracing-futures = "0.2.4"
tracing-subscriber = "0.2.15"
rocksdb = { git = "https://github.com/rust-rocksdb/rust-rocksdb" }
abacus-core = { path = "../../abacus-core" }
abacus-base = { path = "../../abacus-base" }
abacus-ethereum = { path = "../../chains/abacus-ethereum" }
paste = "1.0.5"
[dev-dependencies]
tokio-test = "0.4.0"
abacus-test = { path = "../../abacus-test" }
prometheus = "0.12"

@ -1,44 +0,0 @@
//! The watcher observes the home and replicas for double update fraud.
//!
//! At a regular interval, the watcher polls Home and Replicas for signed
//! updates and checks them against its local DB of updates for fraud. It
//! checks for double updates on both the Home and Replicas and fraudulent
//! updates on just the Replicas by verifying Replica updates on the Home.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
#![warn(unused_extern_crates)]
mod settings;
mod watcher;
use color_eyre::Result;
use abacus_base::AbacusAgent;
use crate::{settings::WatcherSettings as Settings, watcher::Watcher};
async fn _main() -> Result<()> {
color_eyre::install()?;
let settings = Settings::new()?;
let agent = Watcher::from_settings(settings).await?;
agent
.as_ref()
.settings
.tracing
.start_tracing(agent.metrics().span_duration())?;
let _ = agent.metrics().run_http_server();
agent.run_all().await??;
Ok(())
}
fn main() -> Result<()> {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(_main())
}

@ -1,13 +0,0 @@
//! Configuration
use abacus_base::{decl_settings, ChainSetup, SignerConf};
use std::collections::HashMap;
decl_settings!(Watcher {
/// The watcher's attestation signer
watcher: SignerConf,
/// The connection managers to notify of failure
managers: HashMap<String, ChainSetup>,
/// The polling interval (in seconds)
interval: String,
});

@ -1,980 +0,0 @@
use async_trait::async_trait;
use color_eyre::{eyre::bail, Report, Result};
use thiserror::Error;
use ethers::core::types::H256;
use futures_util::future::{join, join_all, select_all};
use std::{collections::HashMap, fmt::Display, sync::Arc, time::Duration};
use tokio::{
sync::{mpsc, oneshot, RwLock},
task::JoinHandle,
time::sleep,
};
use tracing::{error, info, info_span, instrument::Instrumented, Instrument};
use abacus_base::{
cancel_task, AbacusAgent, AgentCore, CachingHome, ConnectionManagers, ContractSyncMetrics,
IndexDataTypes,
};
use abacus_core::{
db::AbacusDB, ChainCommunicationError, Common, CommonEvents, ConnectionManager, DoubleUpdate,
FailureNotification, Home, SignedUpdate, Signers, TxOutcome,
};
use crate::settings::WatcherSettings as Settings;
const AGENT_NAME: &str = "watcher";
#[derive(Debug, Error)]
enum WatcherError {
#[error("Syncing finished")]
SyncingFinished,
}
#[derive(Debug)]
pub struct ContractWatcher<C>
where
C: Common + CommonEvents + ?Sized + 'static,
{
interval: u64,
committed_root: H256,
tx: mpsc::Sender<SignedUpdate>,
contract: Arc<C>,
}
impl<C> Display for ContractWatcher<C>
where
C: Common + CommonEvents + ?Sized + 'static,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "ContractWatcher {{ ")?;
write!(f, "interval: {}", self.interval)?;
write!(f, "committed_root: {}", self.committed_root)?;
write!(f, "contract: {}", self.contract.name())?;
write!(f, "}}")?;
Ok(())
}
}
impl<C> ContractWatcher<C>
where
C: Common + CommonEvents + ?Sized + 'static,
{
pub fn new(
interval: u64,
from: H256,
tx: mpsc::Sender<SignedUpdate>,
contract: Arc<C>,
) -> Self {
Self {
interval,
committed_root: from,
tx,
contract,
}
}
async fn poll_and_send_update(&mut self) -> Result<()> {
let update_opt = self
.contract
.signed_update_by_old_root(self.committed_root)
.await?;
if update_opt.is_none() {
info!(
"No new update found. Previous root: {}. From contract: {}.",
self.committed_root,
self.contract.name()
);
return Ok(());
}
let new_update = update_opt.unwrap();
self.committed_root = new_update.update.new_root;
info!(
"Sending new update to UpdateHandler. Update: {:?}. From contract: {}.",
&new_update,
self.contract.name()
);
self.tx.send(new_update).await?;
Ok(())
}
#[tracing::instrument]
fn spawn(mut self) -> JoinHandle<Result<()>> {
tokio::spawn(async move {
loop {
self.poll_and_send_update().await?;
sleep(Duration::from_secs(self.interval)).await;
}
})
}
}
#[derive(Debug)]
pub struct HistorySync<C>
where
C: Common + CommonEvents + ?Sized + 'static,
{
interval: u64,
committed_root: H256,
tx: mpsc::Sender<SignedUpdate>,
contract: Arc<C>,
}
impl<C> HistorySync<C>
where
C: Common + CommonEvents + ?Sized + 'static,
{
pub fn new(
interval: u64,
from: H256,
tx: mpsc::Sender<SignedUpdate>,
contract: Arc<C>,
) -> Self {
Self {
committed_root: from,
tx,
contract,
interval,
}
}
async fn update_history(&mut self) -> Result<()> {
let previous_update = self
.contract
.signed_update_by_new_root(self.committed_root)
.await?;
if previous_update.is_none() {
info!(
"HistorySync for contract {} has finished.",
self.contract.name()
);
return Err(Report::new(WatcherError::SyncingFinished));
}
// Dispatch to the handler
let previous_update = previous_update.unwrap();
self.tx.send(previous_update.clone()).await?;
// set up for next loop iteration
self.committed_root = previous_update.update.previous_root;
if self.committed_root.is_zero() {
info!(
"HistorySync for contract {} has finished.",
self.contract.name()
);
return Err(Report::new(WatcherError::SyncingFinished));
}
Ok(())
}
#[tracing::instrument]
fn spawn(mut self) -> JoinHandle<Result<()>> {
tokio::spawn(async move {
loop {
let res = self.update_history().await;
if res.is_err() {
// Syncing done
break;
}
sleep(Duration::from_secs(self.interval)).await;
}
Ok(())
})
}
}
#[derive(Debug)]
pub struct UpdateHandler {
rx: mpsc::Receiver<SignedUpdate>,
watcher_db: AbacusDB,
home: Arc<CachingHome>,
}
impl UpdateHandler {
pub fn new(
rx: mpsc::Receiver<SignedUpdate>,
watcher_db: AbacusDB,
home: Arc<CachingHome>,
) -> Self {
Self {
rx,
watcher_db,
home,
}
}
fn check_double_update(&mut self, update: &SignedUpdate) -> Result<(), DoubleUpdate> {
let old_root = update.update.previous_root;
let new_root = update.update.new_root;
match self
.watcher_db
.update_by_previous_root(old_root)
.expect("!db_get")
{
Some(existing) => {
if existing.update.new_root != new_root {
error!(
"UpdateHandler detected double update! Existing: {:?}. Double: {:?}.",
&existing, &update
);
return Err(DoubleUpdate(existing, update.to_owned()));
}
}
None => {
info!(
"UpdateHandler storing new update from root {} to {}. Update: {:?}.",
&update.update.previous_root, &update.update.new_root, &update
);
self.watcher_db.store_update(update).expect("!db_put");
}
}
Ok(())
}
/// Receive updates and check them for fraud. If double update was
/// found, return Ok(double_update). This loop should never exit naturally
/// unless the channel for sending new updates was closed, in which case we
/// return an error.
#[tracing::instrument]
fn spawn(mut self) -> JoinHandle<Result<DoubleUpdate>> {
tokio::spawn(async move {
loop {
let update = self.rx.recv().await;
// channel is closed
if update.is_none() {
bail!("Channel closed.")
}
let update = update.unwrap();
let old_root = update.update.previous_root;
if old_root == self.home.committed_root().await? {
// It is okay if tx reverts
let _ = self.home.update(&update).await;
}
if let Err(double_update) = self.check_double_update(&update) {
return Ok(double_update);
}
}
})
}
}
type TaskMap = Arc<RwLock<HashMap<String, Instrumented<JoinHandle<Result<()>>>>>>;
#[derive(Debug)]
pub struct Watcher {
signer: Arc<Signers>,
interval_seconds: u64,
sync_tasks: TaskMap,
watch_tasks: TaskMap,
connection_managers: Vec<ConnectionManagers>,
core: AgentCore,
}
impl AsRef<AgentCore> for Watcher {
fn as_ref(&self) -> &AgentCore {
&self.core
}
}
#[allow(clippy::unit_arg)]
impl Watcher {
/// Instantiate a new watcher.
pub fn new(
signer: Signers,
interval_seconds: u64,
connection_managers: Vec<ConnectionManagers>,
core: AgentCore,
) -> Self {
Self {
signer: Arc::new(signer),
interval_seconds,
sync_tasks: Default::default(),
watch_tasks: Default::default(),
connection_managers,
core,
}
}
async fn shutdown(&self) {
for (_, v) in self.watch_tasks.write().await.drain() {
cancel_task!(v);
}
for (_, v) in self.sync_tasks.write().await.drain() {
cancel_task!(v);
}
}
// Handle a double-update once it has been detected.
#[tracing::instrument]
async fn handle_failure(
&self,
double: &DoubleUpdate,
) -> Vec<Result<TxOutcome, ChainCommunicationError>> {
// Create vector of double update futures
let mut double_update_futs: Vec<_> = self
.core
.replicas
.values()
.map(|replica| replica.double_update(double))
.collect();
double_update_futs.push(self.core.home.double_update(double));
// Created signed failure notification
let signed_failure = FailureNotification {
home_domain: self.home().local_domain(),
updater: self.home().updater().await.unwrap().into(),
}
.sign_with(self.signer.as_ref())
.await
.expect("!sign");
// Create vector of futures for unenrolling replicas (one per
// connection manager)
let mut unenroll_futs = Vec::new();
for connection_manager in self.connection_managers.iter() {
unenroll_futs.push(connection_manager.unenroll_replica(&signed_failure));
}
// Join both vectors of double update and unenroll futures and
// return vector containing all results
let (double_update_res, unenroll_res) =
join(join_all(double_update_futs), join_all(unenroll_futs)).await;
double_update_res
.into_iter()
.chain(unenroll_res.into_iter())
.collect()
}
fn run_watch_tasks(
&self,
double_update_tx: oneshot::Sender<DoubleUpdate>,
) -> Instrumented<JoinHandle<Result<()>>> {
let home = self.home();
let replicas = self.replicas().clone();
let watcher_db_name = format!("{}_{}", home.name(), AGENT_NAME);
let watcher_db = AbacusDB::new(watcher_db_name, self.db());
let interval_seconds = self.interval_seconds;
let sync_tasks = self.sync_tasks.clone();
let watch_tasks = self.watch_tasks.clone();
tokio::spawn(async move {
// Spawn update handler
let (tx, rx) = mpsc::channel(200);
let handler = UpdateHandler::new(rx, watcher_db, home.clone()).spawn();
// For each replica, spawn polling and history syncing tasks
info!("Spawning replica watch and sync tasks...");
for (name, replica) in replicas {
info!("Spawning watch and sync tasks for replica {}.", name);
let from = replica.committed_root().await?;
watch_tasks.write().await.insert(
(*name).to_owned(),
ContractWatcher::new(interval_seconds, from, tx.clone(), replica.clone())
.spawn()
.in_current_span(),
);
sync_tasks.write().await.insert(
(*name).to_owned(),
HistorySync::new(interval_seconds, from, tx.clone(), replica)
.spawn()
.in_current_span(),
);
}
// Spawn polling and history syncing tasks for home
info!("Starting watch and sync tasks for home {}.", home.name());
let from = home.committed_root().await?;
let home_watcher =
ContractWatcher::new(interval_seconds, from, tx.clone(), home.clone())
.spawn()
.in_current_span();
let home_sync = HistorySync::new(interval_seconds, from, tx.clone(), home)
.spawn()
.in_current_span();
// Wait for update handler to finish (should only happen watcher is
// manually shut down)
let double_update_res = handler.await?;
// Cancel running tasks
tracing::info!("Update handler has resolved. Cancelling all other tasks");
cancel_task!(home_watcher);
cancel_task!(home_sync);
// If update receiver channel was closed we will error out. The only
// reason we pass this point is that we successfully found double
// update.
let double_update = double_update_res?;
error!("Double update found! Sending through through double update tx! Double update: {:?}.", &double_update);
if let Err(e) = double_update_tx.send(double_update) {
bail!("Failed to send double update through oneshot: {:?}", e);
}
Ok(())
})
.in_current_span()
}
}
#[async_trait]
#[allow(clippy::unit_arg)]
impl AbacusAgent for Watcher {
const AGENT_NAME: &'static str = AGENT_NAME;
type Settings = Settings;
#[tracing::instrument(err)]
async fn from_settings(settings: Self::Settings) -> Result<Self>
where
Self: Sized,
{
let mut connection_managers = vec![];
for chain_setup in settings.managers.values() {
let signer = settings.base.get_signer(&chain_setup.name).await;
let manager = chain_setup.try_into_connection_manager(signer).await;
connection_managers.push(manager);
}
let (connection_managers, errors): (Vec<_>, Vec<_>) =
connection_managers.into_iter().partition(Result::is_ok);
// Report any invalid ConnectionManager chain setups
errors.into_iter().for_each(|e| {
let err = e.unwrap_err();
tracing::error!("{:?}", err)
});
let connection_managers: Vec<_> = connection_managers
.into_iter()
.map(Result::unwrap)
.collect();
let core = settings.as_ref().try_into_core("watcher").await?;
Ok(Self::new(
settings.watcher.try_into_signer().await?,
settings.interval.parse().expect("invalid uint"),
connection_managers,
core,
))
}
#[tracing::instrument]
fn run(&self, _name: &str) -> Instrumented<tokio::task::JoinHandle<Result<()>>> {
panic!("Watcher::run should not be called. Always call run_all")
}
fn run_many(&self, _replicas: &[&str]) -> Instrumented<JoinHandle<Result<()>>> {
panic!("Watcher::run_many should not be called. Always call run_all")
}
fn run_all(self) -> Instrumented<JoinHandle<Result<()>>>
where
Self: Sized + 'static,
{
tokio::spawn(async move {
info!("Starting Watcher tasks");
let sync_metrics = ContractSyncMetrics::new(self.metrics(), None);
let index_settings = &self.as_ref().indexer;
let home_sync_task = self
.home()
.sync(Self::AGENT_NAME.to_owned(), index_settings.clone(), sync_metrics, IndexDataTypes::Updates);
let replica_sync_tasks: Vec<Instrumented<JoinHandle<Result<()>>>> = self.replicas().iter().map(|(_name, replica)| {
let replica_sync_metrics = ContractSyncMetrics::new(self.metrics(), None);
replica
.sync(Self::AGENT_NAME.to_owned(),index_settings.clone() , replica_sync_metrics)
}).collect();
// Watcher watch tasks setup
let (double_update_tx, mut double_update_rx) = oneshot::channel::<DoubleUpdate>();
let watch_tasks = self.run_watch_tasks(double_update_tx);
// Race index and run tasks
info!("selecting");
let mut tasks = vec![home_sync_task, watch_tasks];
tasks.extend(replica_sync_tasks);
let (_, _, remaining) = select_all(tasks).await;
// Cancel lagging task and watcher polling/syncing tasks
for task in remaining.into_iter() {
cancel_task!(task);
}
self.shutdown().await;
// Check if double update was sent during run task
match double_update_rx.try_recv() {
Ok(double_update) => {
tracing::error!(
double_update = ?double_update,
"Double update detected! Notifying all contracts and unenrolling replicas! Double update: {:?}",
double_update
);
self.handle_failure(&double_update)
.await
.iter()
.for_each(|res| tracing::info!("{:#?}", res));
bail!(
r#"
Double update detected!
All contracts notified!
Replicas unenrolled!
Watcher has been shut down!
"#
)
}
Err(_) => Ok(()),
}
})
.instrument(info_span!("Watcher::run_all"))
}
}
#[cfg(test)]
mod test {
use abacus_base::IndexSettings;
use abacus_test::mocks::MockIndexer;
use std::sync::Arc;
use tokio::sync::mpsc;
use ethers::core::types::H256;
use ethers::signers::{LocalWallet, Signer};
use abacus_base::{CachingReplica, CommonIndexers, HomeIndexers, Homes, Replicas};
use abacus_core::{DoubleUpdate, SignedFailureNotification, Update};
use abacus_test::{
mocks::{MockConnectionManagerContract, MockHomeContract, MockReplicaContract},
test_utils,
};
use super::*;
#[tokio::test]
async fn contract_watcher_polls_and_sends_update() {
test_utils::run_test_db(|db| async move {
let signer: LocalWallet =
"1111111111111111111111111111111111111111111111111111111111111111"
.parse()
.unwrap();
let first_root = H256::from([0; 32]);
let second_root = H256::from([1; 32]);
let signed_update = Update {
home_domain: 1,
previous_root: first_root,
new_root: second_root,
}
.sign_with(&signer)
.await
.expect("!sign");
let mut mock_home = MockHomeContract::new();
let abacus_db = AbacusDB::new("home_1", db.clone());
{
mock_home.expect__name().return_const("home_1".to_owned());
// When home polls for new update it gets `signed_update`
abacus_db.store_latest_update(&signed_update).unwrap();
}
let mock_home_indexer = Arc::new(MockIndexer::new().into());
let home: Arc<CachingHome> =
CachingHome::new(mock_home.into(), abacus_db.clone(), mock_home_indexer).into();
let (tx, mut rx) = mpsc::channel(200);
let mut contract_watcher =
ContractWatcher::new(3, first_root, tx.clone(), home.clone());
contract_watcher
.poll_and_send_update()
.await
.expect("Should have received Ok(())");
assert_eq!(contract_watcher.committed_root, second_root);
assert_eq!(rx.recv().await.unwrap(), signed_update);
})
.await
}
#[tokio::test]
async fn history_sync_updates_history() {
test_utils::run_test_db(|db| async move {
let signer: LocalWallet =
"1111111111111111111111111111111111111111111111111111111111111111"
.parse()
.unwrap();
let zero_root = H256::zero(); // Original zero root
let first_root = H256::from([1; 32]);
let second_root = H256::from([2; 32]);
// Zero root to first root
let first_signed_update = Update {
home_domain: 1,
previous_root: zero_root,
new_root: first_root,
}
.sign_with(&signer)
.await
.expect("!sign");
// First root to second root
let second_signed_update = Update {
home_domain: 1,
previous_root: first_root,
new_root: second_root,
}
.sign_with(&signer)
.await
.expect("!sign");
let mut mock_home = MockHomeContract::new();
let abacus_db = AbacusDB::new("home_1", db.clone());
{
mock_home.expect__name().return_const("home_1".to_owned());
// When HistorySync works through history it finds second and first signed updates
abacus_db.store_latest_update(&first_signed_update).unwrap();
abacus_db
.store_latest_update(&second_signed_update)
.unwrap();
}
let mock_home_indexer = Arc::new(MockIndexer::new().into());
let home: Arc<CachingHome> =
CachingHome::new(mock_home.into(), abacus_db.clone(), mock_home_indexer).into();
let (tx, mut rx) = mpsc::channel(200);
let mut history_sync = HistorySync::new(3, second_root, tx.clone(), home.clone());
// First update_history call returns first -> second update
history_sync
.update_history()
.await
.expect("Should have received Ok(())");
assert_eq!(history_sync.committed_root, first_root);
assert_eq!(rx.recv().await.unwrap(), second_signed_update);
// Second update_history call returns zero -> first update
// and should return WatcherError::SyncingFinished
history_sync
.update_history()
.await
.expect_err("Should have received WatcherError::SyncingFinished");
assert_eq!(history_sync.committed_root, zero_root);
assert_eq!(rx.recv().await.unwrap(), first_signed_update)
})
.await
}
#[tokio::test]
async fn update_handler_detects_double_update() {
test_utils::run_test_db(|db| async move {
let signer: LocalWallet =
"1111111111111111111111111111111111111111111111111111111111111111"
.parse()
.unwrap();
let first_root = H256::from([1; 32]);
let second_root = H256::from([2; 32]);
let third_root = H256::from([3; 32]);
let bad_third_root = H256::from([4; 32]);
let first_update = Update {
home_domain: 1,
previous_root: first_root,
new_root: second_root,
}
.sign_with(&signer)
.await
.expect("!sign");
let second_update = Update {
home_domain: 1,
previous_root: second_root,
new_root: third_root,
}
.sign_with(&signer)
.await
.expect("!sign");
let bad_second_update = Update {
home_domain: 1,
previous_root: second_root,
new_root: bad_third_root,
}
.sign_with(&signer)
.await
.expect("!sign");
let mut mock_home = MockHomeContract::new();
mock_home.expect__name().return_const("home_1".to_owned());
let abacus_db = AbacusDB::new("home_1_watcher", db);
let mock_home_indexer = Arc::new(MockIndexer::new().into());
let home: Arc<CachingHome> =
CachingHome::new(mock_home.into(), abacus_db.clone(), mock_home_indexer).into();
let (_tx, rx) = mpsc::channel(200);
let mut handler = UpdateHandler {
rx,
watcher_db: abacus_db.clone(),
home,
};
let _first_update_ret = handler
.check_double_update(&first_update)
.expect("Update should have been valid");
let _second_update_ret = handler
.check_double_update(&second_update)
.expect("Update should have been valid");
let bad_second_update_ret = handler
.check_double_update(&bad_second_update)
.expect_err("Update should have been invalid");
assert_eq!(
bad_second_update_ret,
DoubleUpdate(second_update, bad_second_update)
);
})
.await
}
#[tokio::test]
async fn it_fails_contracts_and_unenrolls_replicas_on_double_update() {
test_utils::run_test_db(|db| async move {
let home_domain = 1;
let updater: LocalWallet =
"1111111111111111111111111111111111111111111111111111111111111111"
.parse()
.unwrap();
// Double update setup
let first_root = H256::from([1; 32]);
let second_root = H256::from([2; 32]);
let bad_second_root = H256::from([3; 32]);
let update = Update {
home_domain,
previous_root: first_root,
new_root: second_root,
}
.sign_with(&updater)
.await
.expect("!sign");
let bad_update = Update {
home_domain,
previous_root: first_root,
new_root: bad_second_root,
}
.sign_with(&updater)
.await
.expect("!sign");
let double = DoubleUpdate(update, bad_update);
let signed_failure = FailureNotification {
home_domain,
updater: updater.address().into(),
}
.sign_with(&updater)
.await
.expect("!sign");
// Contract setup
let mut mock_connection_manager_1 = MockConnectionManagerContract::new();
let mut mock_connection_manager_2 = MockConnectionManagerContract::new();
let mut mock_home = MockHomeContract::new();
let mut mock_replica_1 = MockReplicaContract::new();
let mut mock_replica_2 = MockReplicaContract::new();
// Home and replica expectations
{
mock_home.expect__name().return_const("home_1".to_owned());
mock_home
.expect__local_domain()
.times(1)
.return_once(move || home_domain);
let updater = updater.clone();
mock_home
.expect__updater()
.times(1)
.return_once(move || Ok(updater.address().into()));
// home.double_update called once
let double = double.clone();
mock_home
.expect__double_update()
.withf(move |d: &DoubleUpdate| *d == double)
.times(1)
.return_once(move |_| {
Ok(TxOutcome {
txid: H256::default(),
executed: true,
})
});
}
{
mock_replica_1
.expect__name()
.return_const("replica_1".to_owned());
// replica_1.double_update called once
let double = double.clone();
mock_replica_1
.expect__double_update()
.withf(move |d: &DoubleUpdate| *d == double)
.times(1)
.return_once(move |_| {
Ok(TxOutcome {
txid: H256::default(),
executed: true,
})
});
}
{
mock_replica_2
.expect__name()
.return_const("replica_2".to_owned());
// replica_2.double_update called once
let double = double.clone();
mock_replica_2
.expect__double_update()
.withf(move |d: &DoubleUpdate| *d == double)
.times(1)
.return_once(move |_| {
Ok(TxOutcome {
txid: H256::default(),
executed: true,
})
});
}
// Connection manager expectations
{
// connection_manager_1.unenroll_replica called once
let signed_failure = signed_failure.clone();
mock_connection_manager_1
.expect__unenroll_replica()
.withf(move |f: &SignedFailureNotification| *f == signed_failure)
.times(1)
.return_once(move |_| {
Ok(TxOutcome {
txid: H256::default(),
executed: true,
})
});
}
{
// connection_manager_2.unenroll_replica called once
let signed_failure = signed_failure.clone();
mock_connection_manager_2
.expect__unenroll_replica()
.withf(move |f: &SignedFailureNotification| *f == signed_failure)
.times(1)
.return_once(move |_| {
Ok(TxOutcome {
txid: H256::default(),
executed: true,
})
});
}
// Watcher agent setup
let connection_managers: Vec<ConnectionManagers> = vec![
mock_connection_manager_1.into(),
mock_connection_manager_2.into(),
];
let mock_indexer: Arc<CommonIndexers> = Arc::new(MockIndexer::new().into());
let mock_home_indexer: Arc<HomeIndexers> = Arc::new(MockIndexer::new().into());
let mut mock_home: Homes = mock_home.into();
let mut mock_replica_1: Replicas = mock_replica_1.into();
let mut mock_replica_2: Replicas = mock_replica_2.into();
let home_db = AbacusDB::new("home_1", db.clone());
let replica_1_db = AbacusDB::new("replica_1", db.clone());
let replica_2_db = AbacusDB::new("replica_2", db.clone());
{
let home: Arc<CachingHome> = CachingHome::new(
mock_home.clone(),
home_db.clone(),
mock_home_indexer.clone(),
)
.into();
let replica_1: Arc<CachingReplica> = CachingReplica::new(
mock_replica_1.clone(),
replica_1_db.clone(),
mock_indexer.clone(),
)
.into();
let replica_2: Arc<CachingReplica> = CachingReplica::new(
mock_replica_2.clone(),
replica_2_db.clone(),
mock_indexer.clone(),
)
.into();
let mut replica_map: HashMap<String, Arc<CachingReplica>> = HashMap::new();
replica_map.insert("replica_1".into(), replica_1);
replica_map.insert("replica_2".into(), replica_2);
let core = AgentCore {
home: home.clone(),
replicas: replica_map,
db,
indexer: IndexSettings::default(),
settings: abacus_base::Settings::default(),
metrics: Arc::new(
abacus_base::CoreMetrics::new(
"watcher_test",
None,
Arc::new(prometheus::Registry::new()),
)
.expect("could not make metrics"),
),
};
let mut watcher = Watcher::new(updater.into(), 1, connection_managers, core);
watcher.handle_failure(&double).await;
// Checkpoint connection managers
for connection_manager in watcher.connection_managers.iter_mut() {
connection_manager.checkpoint();
}
}
// Checkpoint home and replicas
Arc::get_mut(&mut mock_home).unwrap().checkpoint();
Arc::get_mut(&mut mock_replica_1).unwrap().checkpoint();
Arc::get_mut(&mut mock_replica_2).unwrap().checkpoint();
})
.await
}
}

@ -1,601 +0,0 @@
[
{
"inputs": [
{
"internalType": "uint32",
"name": "_localDomain",
"type": "uint32"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "bytes32",
"name": "messageHash",
"type": "bytes32"
},
{
"indexed": true,
"internalType": "uint256",
"name": "leafIndex",
"type": "uint256"
},
{
"indexed": true,
"internalType": "uint64",
"name": "destinationAndNonce",
"type": "uint64"
},
{
"indexed": false,
"internalType": "bytes32",
"name": "committedRoot",
"type": "bytes32"
},
{
"indexed": false,
"internalType": "bytes",
"name": "message",
"type": "bytes"
}
],
"name": "Dispatch",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "bytes32",
"name": "oldRoot",
"type": "bytes32"
},
{
"indexed": false,
"internalType": "bytes32[2]",
"name": "newRoot",
"type": "bytes32[2]"
},
{
"indexed": false,
"internalType": "bytes",
"name": "signature",
"type": "bytes"
},
{
"indexed": false,
"internalType": "bytes",
"name": "signature2",
"type": "bytes"
}
],
"name": "DoubleUpdate",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "bytes32",
"name": "oldRoot",
"type": "bytes32"
},
{
"indexed": false,
"internalType": "bytes32",
"name": "newRoot",
"type": "bytes32"
},
{
"indexed": false,
"internalType": "bytes",
"name": "signature",
"type": "bytes"
}
],
"name": "ImproperUpdate",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "address",
"name": "updater",
"type": "address"
}
],
"name": "NewUpdater",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "address",
"name": "updaterManager",
"type": "address"
}
],
"name": "NewUpdaterManager",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "uint32",
"name": "homeDomain",
"type": "uint32"
},
{
"indexed": true,
"internalType": "bytes32",
"name": "oldRoot",
"type": "bytes32"
},
{
"indexed": true,
"internalType": "bytes32",
"name": "newRoot",
"type": "bytes32"
},
{
"indexed": false,
"internalType": "bytes",
"name": "signature",
"type": "bytes"
}
],
"name": "Update",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "updater",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "reporter",
"type": "address"
}
],
"name": "UpdaterSlashed",
"type": "event"
},
{
"inputs": [],
"name": "MAX_MESSAGE_BODY_BYTES",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "VERSION",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "committedRoot",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "count",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint32",
"name": "_destinationDomain",
"type": "uint32"
},
{
"internalType": "bytes32",
"name": "_recipientAddress",
"type": "bytes32"
},
{
"internalType": "bytes",
"name": "_messageBody",
"type": "bytes"
}
],
"name": "dispatch",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_oldRoot",
"type": "bytes32"
},
{
"internalType": "bytes32[2]",
"name": "_newRoot",
"type": "bytes32[2]"
},
{
"internalType": "bytes",
"name": "_signature",
"type": "bytes"
},
{
"internalType": "bytes",
"name": "_signature2",
"type": "bytes"
}
],
"name": "doubleUpdate",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "homeDomainHash",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_oldRoot",
"type": "bytes32"
},
{
"internalType": "bytes32",
"name": "_newRoot",
"type": "bytes32"
},
{
"internalType": "bytes",
"name": "_signature",
"type": "bytes"
}
],
"name": "improperUpdate",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "contract IUpdaterManager",
"name": "_updaterManager",
"type": "address"
}
],
"name": "initialize",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "localDomain",
"outputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"name": "nonces",
"outputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_item",
"type": "bytes32"
}
],
"name": "queueContains",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "queueEnd",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "queueLength",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "root",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_updater",
"type": "address"
}
],
"name": "setUpdater",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_updaterManager",
"type": "address"
}
],
"name": "setUpdaterManager",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "state",
"outputs": [
{
"internalType": "enum Common.States",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "suggestUpdate",
"outputs": [
{
"internalType": "bytes32",
"name": "_committedRoot",
"type": "bytes32"
},
{
"internalType": "bytes32",
"name": "_new",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "tree",
"outputs": [
{
"internalType": "uint256",
"name": "count",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_committedRoot",
"type": "bytes32"
},
{
"internalType": "bytes32",
"name": "_newRoot",
"type": "bytes32"
},
{
"internalType": "bytes",
"name": "_signature",
"type": "bytes"
}
],
"name": "update",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "updater",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "updaterManager",
"outputs": [
{
"internalType": "contract IUpdaterManager",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
}
]

@ -1,518 +0,0 @@
[
{
"inputs": [
{
"internalType": "uint32",
"name": "_localDomain",
"type": "uint32"
},
{
"internalType": "uint256",
"name": "_processGas",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "_reserveGas",
"type": "uint256"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "bytes32",
"name": "oldRoot",
"type": "bytes32"
},
{
"indexed": false,
"internalType": "bytes32[2]",
"name": "newRoot",
"type": "bytes32[2]"
},
{
"indexed": false,
"internalType": "bytes",
"name": "signature",
"type": "bytes"
},
{
"indexed": false,
"internalType": "bytes",
"name": "signature2",
"type": "bytes"
}
],
"name": "DoubleUpdate",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "address",
"name": "updater",
"type": "address"
}
],
"name": "NewUpdater",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "bytes32",
"name": "messageHash",
"type": "bytes32"
},
{
"indexed": true,
"internalType": "bool",
"name": "success",
"type": "bool"
},
{
"indexed": true,
"internalType": "bytes",
"name": "returnData",
"type": "bytes"
}
],
"name": "Process",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "uint32",
"name": "homeDomain",
"type": "uint32"
},
{
"indexed": true,
"internalType": "bytes32",
"name": "oldRoot",
"type": "bytes32"
},
{
"indexed": true,
"internalType": "bytes32",
"name": "newRoot",
"type": "bytes32"
},
{
"indexed": false,
"internalType": "bytes",
"name": "signature",
"type": "bytes"
}
],
"name": "Update",
"type": "event"
},
{
"inputs": [],
"name": "PROCESS_GAS",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "RESERVE_GAS",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "VERSION",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_root",
"type": "bytes32"
}
],
"name": "acceptableRoot",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "committedRoot",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"name": "confirmAt",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_oldRoot",
"type": "bytes32"
},
{
"internalType": "bytes32[2]",
"name": "_newRoot",
"type": "bytes32[2]"
},
{
"internalType": "bytes",
"name": "_signature",
"type": "bytes"
},
{
"internalType": "bytes",
"name": "_signature2",
"type": "bytes"
}
],
"name": "doubleUpdate",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "homeDomainHash",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint32",
"name": "_remoteDomain",
"type": "uint32"
},
{
"internalType": "address",
"name": "_updater",
"type": "address"
},
{
"internalType": "bytes32",
"name": "_committedRoot",
"type": "bytes32"
},
{
"internalType": "uint256",
"name": "_optimisticSeconds",
"type": "uint256"
}
],
"name": "initialize",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "localDomain",
"outputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"name": "messages",
"outputs": [
{
"internalType": "enum Replica.MessageStatus",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "optimisticSeconds",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes",
"name": "_message",
"type": "bytes"
}
],
"name": "process",
"outputs": [
{
"internalType": "bool",
"name": "_success",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_leaf",
"type": "bytes32"
},
{
"internalType": "bytes32[32]",
"name": "_proof",
"type": "bytes32[32]"
},
{
"internalType": "uint256",
"name": "_index",
"type": "uint256"
}
],
"name": "prove",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes",
"name": "_message",
"type": "bytes"
},
{
"internalType": "bytes32[32]",
"name": "_proof",
"type": "bytes32[32]"
},
{
"internalType": "uint256",
"name": "_index",
"type": "uint256"
}
],
"name": "proveAndProcess",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "remoteDomain",
"outputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_updater",
"type": "address"
}
],
"name": "setUpdater",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "state",
"outputs": [
{
"internalType": "enum Common.States",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "bytes32",
"name": "_oldRoot",
"type": "bytes32"
},
{
"internalType": "bytes32",
"name": "_newRoot",
"type": "bytes32"
},
{
"internalType": "bytes",
"name": "_signature",
"type": "bytes"
}
],
"name": "update",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "updater",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
}
]

@ -1,319 +0,0 @@
[
{
"inputs": [],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "uint32",
"name": "domain",
"type": "uint32"
},
{
"indexed": false,
"internalType": "address",
"name": "replica",
"type": "address"
}
],
"name": "ReplicaEnrolled",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "uint32",
"name": "domain",
"type": "uint32"
},
{
"indexed": false,
"internalType": "address",
"name": "replica",
"type": "address"
}
],
"name": "ReplicaUnenrolled",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "uint32",
"name": "domain",
"type": "uint32"
},
{
"indexed": false,
"internalType": "address",
"name": "watcher",
"type": "address"
},
{
"indexed": false,
"internalType": "bool",
"name": "access",
"type": "bool"
}
],
"name": "WatcherPermissionSet",
"type": "event"
},
{
"inputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"name": "domainToReplica",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "home",
"outputs": [
{
"internalType": "contract Home",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_replica",
"type": "address"
}
],
"name": "isReplica",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "localDomain",
"outputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_replica",
"type": "address"
},
{
"internalType": "uint32",
"name": "_domain",
"type": "uint32"
}
],
"name": "ownerEnrollReplica",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_replica",
"type": "address"
}
],
"name": "ownerUnenrollReplica",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"name": "replicaToDomain",
"outputs": [
{
"internalType": "uint32",
"name": "",
"type": "uint32"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_home",
"type": "address"
}
],
"name": "setHome",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_watcher",
"type": "address"
},
{
"internalType": "uint32",
"name": "_domain",
"type": "uint32"
},
{
"internalType": "bool",
"name": "_access",
"type": "bool"
}
],
"name": "setWatcherPermission",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint32",
"name": "_domain",
"type": "uint32"
},
{
"internalType": "bytes32",
"name": "_updater",
"type": "bytes32"
},
{
"internalType": "bytes",
"name": "_signature",
"type": "bytes"
}
],
"name": "unenrollReplica",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "_watcher",
"type": "address"
},
{
"internalType": "uint32",
"name": "_domain",
"type": "uint32"
}
],
"name": "watcherPermission",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "view",
"type": "function"
}
]

@ -1,315 +0,0 @@
#![allow(clippy::enum_variant_names)]
#![allow(missing_docs)]
use abacus_core::*;
use abacus_core::{
ChainCommunicationError, Common, DoubleUpdate, Home, Message, RawCommittedMessage,
SignedUpdate, State, TxOutcome, Update,
};
use async_trait::async_trait;
use color_eyre::Result;
use ethers::contract::abigen;
use ethers::core::types::{Signature, H256};
use std::{convert::TryFrom, error::Error as StdError, sync::Arc};
use tracing::instrument;
use crate::report_tx;
abigen!(
EthereumHomeInternal,
"./chains/abacus-ethereum/abis/Home.abi.json"
);
impl<M> std::fmt::Display for EthereumHomeInternal<M>
where
M: ethers::providers::Middleware,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug)]
/// Struct that retrieves event data for an Ethereum home
pub struct EthereumHomeIndexer<M>
where
M: ethers::providers::Middleware,
{
contract: Arc<EthereumHomeInternal<M>>,
provider: Arc<M>,
from_height: u32,
chunk_size: u32,
}
impl<M> EthereumHomeIndexer<M>
where
M: ethers::providers::Middleware + 'static,
{
/// Create new EthereumHomeIndexer
pub fn new(
provider: Arc<M>,
ContractLocator {
name: _,
domain: _,
address,
}: &ContractLocator,
from_height: u32,
chunk_size: u32,
) -> Self {
Self {
contract: Arc::new(EthereumHomeInternal::new(address, provider.clone())),
provider,
from_height,
chunk_size,
}
}
}
#[async_trait]
impl<M> CommonIndexer for EthereumHomeIndexer<M>
where
M: ethers::providers::Middleware + 'static,
{
#[instrument(err, skip(self))]
async fn get_block_number(&self) -> Result<u32> {
Ok(self.provider.get_block_number().await?.as_u32())
}
#[instrument(err, skip(self))]
async fn fetch_sorted_updates(&self, from: u32, to: u32) -> Result<Vec<SignedUpdateWithMeta>> {
let mut events = self
.contract
.update_filter()
.from_block(from)
.to_block(to)
.query_with_meta()
.await?;
events.sort_by(|a, b| {
let mut ordering = a.1.block_number.cmp(&b.1.block_number);
if ordering == std::cmp::Ordering::Equal {
ordering = a.1.transaction_index.cmp(&b.1.transaction_index);
}
ordering
});
Ok(events
.iter()
.map(|event| {
let signature = Signature::try_from(event.0.signature.as_ref())
.expect("chain accepted invalid signature");
let update = Update {
home_domain: event.0.home_domain,
previous_root: event.0.old_root.into(),
new_root: event.0.new_root.into(),
};
SignedUpdateWithMeta {
signed_update: SignedUpdate { update, signature },
metadata: UpdateMeta {
block_number: event.1.block_number.as_u64(),
},
}
})
.collect())
}
}
#[async_trait]
impl<M> HomeIndexer for EthereumHomeIndexer<M>
where
M: ethers::providers::Middleware + 'static,
{
#[instrument(err, skip(self))]
async fn fetch_sorted_messages(&self, from: u32, to: u32) -> Result<Vec<RawCommittedMessage>> {
let mut events = self
.contract
.dispatch_filter()
.from_block(from)
.to_block(to)
.query()
.await?;
events.sort_by(|a, b| a.leaf_index.cmp(&b.leaf_index));
Ok(events
.into_iter()
.map(|f| RawCommittedMessage {
leaf_index: f.leaf_index.as_u32(),
committed_root: f.committed_root.into(),
message: f.message.to_vec(),
})
.collect())
}
}
/// A reference to a Home contract on some Ethereum chain
#[derive(Debug)]
pub struct EthereumHome<M>
where
M: ethers::providers::Middleware,
{
contract: Arc<EthereumHomeInternal<M>>,
domain: u32,
name: String,
provider: Arc<M>,
}
impl<M> EthereumHome<M>
where
M: ethers::providers::Middleware + 'static,
{
/// Create a reference to a Home at a specific Ethereum address on some
/// chain
pub fn new(
provider: Arc<M>,
ContractLocator {
name,
domain,
address,
}: &ContractLocator,
) -> Self {
Self {
contract: Arc::new(EthereumHomeInternal::new(address, provider.clone())),
domain: *domain,
name: name.to_owned(),
provider,
}
}
}
#[async_trait]
impl<M> Common for EthereumHome<M>
where
M: ethers::providers::Middleware + 'static,
{
fn name(&self) -> &str {
&self.name
}
#[tracing::instrument(err, skip(self))]
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
let receipt_opt = self
.contract
.client()
.get_transaction_receipt(txid)
.await
.map_err(|e| Box::new(e) as Box<dyn StdError + Send + Sync>)?;
Ok(receipt_opt.map(Into::into))
}
#[tracing::instrument(err, skip(self))]
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
Ok(self.contract.updater().call().await?.into())
}
#[tracing::instrument(err, skip(self))]
async fn state(&self) -> Result<State, ChainCommunicationError> {
let state = self.contract.state().call().await?;
match state {
0 => Ok(State::Waiting),
1 => Ok(State::Failed),
_ => unreachable!(),
}
}
#[tracing::instrument(err, skip(self))]
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
Ok(self.contract.committed_root().call().await?.into())
}
#[tracing::instrument(err, skip(self), fields(hex_signature = %format!("0x{}", hex::encode(update.signature.to_vec()))))]
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.update(
update.update.previous_root.to_fixed_bytes(),
update.update.new_root.to_fixed_bytes(),
update.signature.to_vec().into(),
);
Ok(report_tx!(tx).into())
}
#[tracing::instrument(err, skip(self))]
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.double_update(
double.0.update.previous_root.to_fixed_bytes(),
[
double.0.update.new_root.to_fixed_bytes(),
double.1.update.new_root.to_fixed_bytes(),
],
double.0.signature.to_vec().into(),
double.1.signature.to_vec().into(),
);
let response = report_tx!(tx);
Ok(response.into())
}
}
#[async_trait]
impl<M> Home for EthereumHome<M>
where
M: ethers::providers::Middleware + 'static,
{
fn local_domain(&self) -> u32 {
self.domain
}
#[tracing::instrument(err, skip(self))]
async fn nonces(&self, destination: u32) -> Result<u32, ChainCommunicationError> {
Ok(self.contract.nonces(destination).call().await?)
}
#[tracing::instrument(err, skip(self))]
async fn dispatch(&self, message: &Message) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.dispatch(
message.destination,
message.recipient.to_fixed_bytes(),
message.body.clone().into(),
);
Ok(report_tx!(tx).into())
}
async fn queue_contains(&self, root: H256) -> Result<bool, ChainCommunicationError> {
Ok(self.contract.queue_contains(root.into()).call().await?)
}
#[tracing::instrument(err, skip(self), fields(hex_signature = %format!("0x{}", hex::encode(update.signature.to_vec()))))]
async fn improper_update(
&self,
update: &SignedUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.improper_update(
update.update.previous_root.to_fixed_bytes(),
update.update.new_root.to_fixed_bytes(),
update.signature.to_vec().into(),
);
Ok(report_tx!(tx).into())
}
#[tracing::instrument(err, skip(self))]
async fn produce_update(&self) -> Result<Option<Update>, ChainCommunicationError> {
let (a, b) = self.contract.suggest_update().call().await?;
let previous_root: H256 = a.into();
let new_root: H256 = b.into();
if new_root.is_zero() {
return Ok(None);
}
Ok(Some(Update {
home_domain: self.local_domain(),
previous_root,
new_root,
}))
}
}

@ -50,7 +50,7 @@ impl<M> EthereumInboxIndexer<M>
where where
M: ethers::providers::Middleware + 'static, M: ethers::providers::Middleware + 'static,
{ {
/// Create new EthereumHomeIndexer /// Create new EthereumInboxIndexer
pub fn new( pub fn new(
provider: Arc<M>, provider: Arc<M>,
ContractLocator { ContractLocator {

@ -22,18 +22,6 @@ mod outbox;
#[cfg(not(doctest))] #[cfg(not(doctest))]
mod inbox; mod inbox;
/// Home abi
#[cfg(not(doctest))]
mod home;
/// Replica abi
#[cfg(not(doctest))]
mod replica;
/// XAppConnectionManager abi
#[cfg(not(doctest))]
mod xapp;
/// Ethereum connection configuration /// Ethereum connection configuration
#[derive(Debug, serde::Deserialize, Clone)] #[derive(Debug, serde::Deserialize, Clone)]
#[serde(tag = "type", rename_all = "camelCase")] #[serde(tag = "type", rename_all = "camelCase")]
@ -59,7 +47,7 @@ impl Default for Connection {
} }
#[cfg(not(doctest))] #[cfg(not(doctest))]
pub use crate::{home::*, inbox::*, outbox::*, replica::*, xapp::*}; pub use crate::{inbox::*, outbox::*};
#[allow(dead_code)] #[allow(dead_code)]
/// A live connection to an ethereum-compatible chain. /// A live connection to an ethereum-compatible chain.
@ -68,28 +56,6 @@ pub struct Chain {
ethers: ethers::providers::Provider<ethers::providers::Http>, ethers: ethers::providers::Provider<ethers::providers::Http>,
} }
boxed_trait!(
make_home_indexer,
EthereumHomeIndexer,
HomeIndexer,
from_height: u32,
chunk_size: u32
);
boxed_trait!(
make_replica_indexer,
EthereumReplicaIndexer,
CommonIndexer,
from_height: u32,
chunk_size: u32
);
boxed_trait!(make_replica, EthereumReplica, Replica,);
boxed_trait!(make_home, EthereumHome, Home,);
boxed_trait!(
make_conn_manager,
EthereumConnectionManager,
ConnectionManager,
);
boxed_trait!( boxed_trait!(
make_outbox_indexer, make_outbox_indexer,
EthereumOutboxIndexer, EthereumOutboxIndexer,

@ -1,300 +0,0 @@
#![allow(clippy::enum_variant_names)]
#![allow(missing_docs)]
use abacus_core::{accumulator::merkle::Proof, *};
use abacus_core::{CommonIndexer, ContractLocator};
use async_trait::async_trait;
use color_eyre::Result;
use ethers::contract::abigen;
use ethers::core::types::{Signature, H256, U256};
use tracing::instrument;
use std::{convert::TryFrom, error::Error as StdError, sync::Arc};
use crate::report_tx;
abigen!(
EthereumReplicaInternal,
"./chains/abacus-ethereum/abis/Replica.abi.json",
methods {
initialize(address) as initialize_common;
initialize(uint32, address, bytes32, uint256, uint32) as initialize;
},
);
impl<M> std::fmt::Display for EthereumReplicaInternal<M>
where
M: ethers::providers::Middleware,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug)]
/// Struct that retrieves indexes event data for Ethereum replica
pub struct EthereumReplicaIndexer<M>
where
M: ethers::providers::Middleware,
{
contract: Arc<EthereumReplicaInternal<M>>,
provider: Arc<M>,
from_height: u32,
chunk_size: u32,
}
impl<M> EthereumReplicaIndexer<M>
where
M: ethers::providers::Middleware + 'static,
{
/// Create new EthereumHomeIndexer
pub fn new(
provider: Arc<M>,
ContractLocator {
name: _,
domain: _,
address,
}: &ContractLocator,
from_height: u32,
chunk_size: u32,
) -> Self {
Self {
contract: Arc::new(EthereumReplicaInternal::new(address, provider.clone())),
provider,
from_height,
chunk_size,
}
}
}
#[async_trait]
impl<M> CommonIndexer for EthereumReplicaIndexer<M>
where
M: ethers::providers::Middleware + 'static,
{
#[instrument(err, skip(self))]
async fn get_block_number(&self) -> Result<u32> {
Ok(self.provider.get_block_number().await?.as_u32())
}
#[instrument(err, skip(self))]
async fn fetch_sorted_updates(&self, from: u32, to: u32) -> Result<Vec<SignedUpdateWithMeta>> {
let mut events = self
.contract
.update_filter()
.from_block(from)
.to_block(to)
.query_with_meta()
.await?;
events.sort_by(|a, b| {
let mut ordering = a.1.block_number.cmp(&b.1.block_number);
if ordering == std::cmp::Ordering::Equal {
ordering = a.1.transaction_index.cmp(&b.1.transaction_index);
}
ordering
});
Ok(events
.iter()
.map(|event| {
let signature = Signature::try_from(event.0.signature.as_ref())
.expect("chain accepted invalid signature");
let update = Update {
home_domain: event.0.home_domain,
previous_root: event.0.old_root.into(),
new_root: event.0.new_root.into(),
};
SignedUpdateWithMeta {
signed_update: SignedUpdate { update, signature },
metadata: UpdateMeta {
block_number: event.1.block_number.as_u64(),
},
}
})
.collect())
}
}
/// A struct that provides access to an Ethereum replica contract
#[derive(Debug)]
pub struct EthereumReplica<M>
where
M: ethers::providers::Middleware,
{
contract: Arc<EthereumReplicaInternal<M>>,
domain: u32,
name: String,
provider: Arc<M>,
}
impl<M> EthereumReplica<M>
where
M: ethers::providers::Middleware,
{
/// Create a reference to a Replica at a specific Ethereum address on some
/// chain
pub fn new(
provider: Arc<M>,
ContractLocator {
name,
domain,
address,
}: &ContractLocator,
) -> Self {
Self {
contract: Arc::new(EthereumReplicaInternal::new(address, provider.clone())),
domain: *domain,
name: name.to_owned(),
provider,
}
}
}
#[async_trait]
impl<M> Common for EthereumReplica<M>
where
M: ethers::providers::Middleware + 'static,
{
fn name(&self) -> &str {
&self.name
}
#[tracing::instrument(err)]
async fn status(&self, txid: H256) -> Result<Option<TxOutcome>, ChainCommunicationError> {
let receipt_opt = self
.contract
.client()
.get_transaction_receipt(txid)
.await
.map_err(|e| Box::new(e) as Box<dyn StdError + Send + Sync>)?;
Ok(receipt_opt.map(Into::into))
}
#[tracing::instrument(err)]
async fn updater(&self) -> Result<H256, ChainCommunicationError> {
Ok(self.contract.updater().call().await?.into())
}
#[tracing::instrument(err)]
async fn state(&self) -> Result<State, ChainCommunicationError> {
let state = self.contract.state().call().await?;
match state {
0 => Ok(State::Waiting),
1 => Ok(State::Failed),
_ => unreachable!(),
}
}
#[tracing::instrument(err)]
async fn committed_root(&self) -> Result<H256, ChainCommunicationError> {
Ok(self.contract.committed_root().call().await?.into())
}
#[tracing::instrument(err)]
async fn update(&self, update: &SignedUpdate) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.update(
update.update.previous_root.to_fixed_bytes(),
update.update.new_root.to_fixed_bytes(),
update.signature.to_vec().into(),
);
let result = report_tx!(tx);
Ok(result.into())
}
#[tracing::instrument(err)]
async fn double_update(
&self,
double: &DoubleUpdate,
) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.double_update(
double.0.update.previous_root.to_fixed_bytes(),
[
double.0.update.new_root.to_fixed_bytes(),
double.1.update.new_root.to_fixed_bytes(),
],
double.0.signature.to_vec().into(),
double.1.signature.to_vec().into(),
);
Ok(report_tx!(tx).into())
}
}
#[async_trait]
impl<M> Replica for EthereumReplica<M>
where
M: ethers::providers::Middleware + 'static,
{
fn local_domain(&self) -> u32 {
self.domain
}
async fn remote_domain(&self) -> Result<u32, ChainCommunicationError> {
Ok(self.contract.remote_domain().call().await?)
}
#[tracing::instrument(err)]
async fn prove(&self, proof: &Proof) -> Result<TxOutcome, ChainCommunicationError> {
let mut sol_proof: [[u8; 32]; 32] = Default::default();
sol_proof
.iter_mut()
.enumerate()
.for_each(|(i, elem)| *elem = proof.path[i].to_fixed_bytes());
let tx = self
.contract
.prove(proof.leaf.into(), sol_proof, proof.index.into());
Ok(report_tx!(tx).into())
}
#[tracing::instrument(err)]
async fn process(&self, message: &AbacusMessage) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.process(message.to_vec().into());
let gas = tx.estimate_gas().await?.saturating_add(U256::from(100000));
let gassed = tx.gas(gas);
Ok(report_tx!(gassed).into())
}
#[tracing::instrument(err)]
async fn prove_and_process(
&self,
message: &AbacusMessage,
proof: &Proof,
) -> Result<TxOutcome, ChainCommunicationError> {
let mut sol_proof: [[u8; 32]; 32] = Default::default();
sol_proof
.iter_mut()
.enumerate()
.for_each(|(i, elem)| *elem = proof.path[i].to_fixed_bytes());
//
let tx =
self.contract
.prove_and_process(message.to_vec().into(), sol_proof, proof.index.into());
let gas = tx.estimate_gas().await?.saturating_add(U256::from(100000));
let gassed = tx.gas(gas);
Ok(report_tx!(gassed).into())
}
#[tracing::instrument(err)]
async fn message_status(&self, leaf: H256) -> Result<MessageStatus, ChainCommunicationError> {
let status = self.contract.messages(leaf.into()).call().await?;
match status {
0 => Ok(MessageStatus::None),
1 => Ok(MessageStatus::Proven),
2 => Ok(MessageStatus::Processed),
_ => panic!("Bad status from solidity"),
}
}
async fn acceptable_root(&self, root: H256) -> Result<bool, ChainCommunicationError> {
Ok(self.contract.acceptable_root(root.into()).call().await?)
}
}

@ -1,140 +0,0 @@
#![allow(clippy::enum_variant_names)]
#![allow(missing_docs)]
use abacus_core::*;
use async_trait::async_trait;
use ethers::contract::abigen;
use std::sync::Arc;
use crate::report_tx;
abigen!(
EthereumConnectionManagerInternal,
"./chains/abacus-ethereum/abis/XAppConnectionManager.abi.json"
);
/// A reference to a XAppConnectionManager contract on some Ethereum chain
#[derive(Debug)]
pub struct EthereumConnectionManager<M>
where
M: ethers::providers::Middleware,
{
contract: EthereumConnectionManagerInternal<M>,
domain: u32,
name: String,
}
impl<M> EthereumConnectionManager<M>
where
M: ethers::providers::Middleware,
{
/// Create a reference to a XAppConnectionManager at a specific Ethereum
/// address on some chain
#[allow(dead_code)]
pub fn new(
provider: Arc<M>,
ContractLocator {
name,
domain,
address,
}: &ContractLocator,
) -> Self {
Self {
contract: EthereumConnectionManagerInternal::new(address, provider),
domain: *domain,
name: name.to_owned(),
}
}
}
#[async_trait]
impl<M> ConnectionManager for EthereumConnectionManager<M>
where
M: ethers::providers::Middleware + 'static,
{
fn local_domain(&self) -> u32 {
self.domain
}
#[tracing::instrument(err)]
async fn is_replica(&self, address: AbacusIdentifier) -> Result<bool, ChainCommunicationError> {
Ok(self
.contract
.is_replica(address.as_ethereum_address())
.call()
.await?)
}
#[tracing::instrument(err)]
async fn watcher_permission(
&self,
address: AbacusIdentifier,
domain: u32,
) -> Result<bool, ChainCommunicationError> {
Ok(self
.contract
.watcher_permission(address.as_ethereum_address(), domain)
.call()
.await?)
}
#[tracing::instrument(err)]
async fn owner_enroll_replica(
&self,
replica: AbacusIdentifier,
domain: u32,
) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self
.contract
.owner_enroll_replica(replica.as_ethereum_address(), domain);
Ok(report_tx!(tx).into())
}
#[tracing::instrument(err)]
async fn owner_unenroll_replica(
&self,
replica: AbacusIdentifier,
) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self
.contract
.owner_unenroll_replica(replica.as_ethereum_address());
Ok(report_tx!(tx).into())
}
#[tracing::instrument(err)]
async fn set_home(&self, home: AbacusIdentifier) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.set_home(home.as_ethereum_address());
Ok(report_tx!(tx).into())
}
#[tracing::instrument(err)]
async fn set_watcher_permission(
&self,
watcher: AbacusIdentifier,
domain: u32,
access: bool,
) -> Result<TxOutcome, ChainCommunicationError> {
let tx =
self.contract
.set_watcher_permission(watcher.as_ethereum_address(), domain, access);
Ok(report_tx!(tx).into())
}
#[tracing::instrument(err)]
async fn unenroll_replica(
&self,
signed_failure: &SignedFailureNotification,
) -> Result<TxOutcome, ChainCommunicationError> {
let tx = self.contract.unenroll_replica(
signed_failure.notification.home_domain,
signed_failure.notification.updater.into(),
signed_failure.signature.to_vec().into(),
);
Ok(report_tx!(tx).into())
}
}

@ -1,7 +1,7 @@
{ {
"environment": "test", "environment": "test",
"signers": {}, "signers": {},
"replicas": { "inboxes": {
"kovan": { "kovan": {
"address": "0x84eA74d481Ee0A5332c457a4d796187F6Ba67fEB", "address": "0x84eA74d481Ee0A5332c457a4d796187F6Ba67fEB",
"domain": "3000", "domain": "3000",
@ -33,7 +33,7 @@
} }
} }
}, },
"home": { "outbox": {
"address": "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6", "address": "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6",
"domain": "1000", "domain": "1000",
"name": "alfajores", "name": "alfajores",

@ -1,7 +1,7 @@
{ {
"environment": "test", "environment": "test",
"signers": {}, "signers": {},
"replicas": { "inboxes": {
"alfajores": { "alfajores": {
"address": "0x9A9f2CCfdE556A7E9Ff0848998Aa4a0CFD8863AE", "address": "0x9A9f2CCfdE556A7E9Ff0848998Aa4a0CFD8863AE",
"domain": "1000", "domain": "1000",
@ -33,7 +33,7 @@
} }
} }
}, },
"home": { "outbox": {
"address": "0x5081a39b8A5f0E35a8D959395a630b68B74Dd30f", "address": "0x5081a39b8A5f0E35a8D959395a630b68B74Dd30f",
"domain": "43113", "domain": "43113",
"name": "fuji", "name": "fuji",

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save