Relayer retry endpoints (#3476)
### Description Adds a `/message_retry` relayer endpoint, which supports retrying OpQueue operations either by ID or by destination domain. Such a request will be sent to an `mpmc` channel's receiving end in the OpQueue, which is emptied when `OpQueue::pop` is called. Example calls: ``` GET http://127.0.0.1:60843/message_retry?destination_domain=42 GET http://127.0.0.1:60843/message_retry?message_id=0x46910b1329ee53c86a023b322e9ca1c17e5f9f0bee789c77b0abced0a173d714 ``` If the endpoint is called specifying both filters, **two** requests will be sent across the channel, one for each condition. Efficiency note: The entire queue is iterated over if there's at least one retry request in the channel. The good thing is that rebuilding the heap is `O(n)` so not too bad. ### Drive-by changes - Removed the usage of `enum_dispatch`, because it would have made unit tests messy. The drawback is that we have to work around object safety restrictions in `PendingOperation` - for instance it can't be cloned (but can be stored behind an `Arc` if we ever need this). - Added new methods to `PendingOperation`, either to match by ID, or to implement heap element prioritization without depending on the concrete `PendingMessage` type (as was done before). Additions: `id()`, `origin_domain()`, `priority()`. Don't think this conflicts with any operations we may add in the future (e.g. gas oracle updates). - Although `tokio`'s broadcast channel is multi-producer-multi-consumer by default, the consumer end isn't `Clone` - you instead need to have a producer to call `.subscribe()` on to get a new consumer, so I added an `MpmcChannel` struct to encapsulate keeping a producer around to get new consumers. - OpQueue is moved into its own file and has unit tests added for retries (covering broadcast to multi-queue, and both retry types). - Did a bit of agent `server `cleanup, including `EigenNodeApi` ### Related issues - Fixes https://github.com/hyperlane-xyz/hyperlane-monorepo/issues/3098 ### Backward compatibility Yes ### Testing Unit tests for the server route (channel transmitter logic) and for the OpQueue retries (channel receiver logic).pull/3489/head
parent
258bf85e43
commit
6e39cf01af
@ -0,0 +1,303 @@ |
||||
use std::{cmp::Reverse, collections::BinaryHeap, sync::Arc}; |
||||
|
||||
use derive_new::new; |
||||
use hyperlane_core::MpmcReceiver; |
||||
use prometheus::{IntGauge, IntGaugeVec}; |
||||
use tokio::sync::Mutex; |
||||
use tracing::info; |
||||
|
||||
use crate::server::MessageRetryRequest; |
||||
|
||||
use super::pending_operation::PendingOperation; |
||||
|
||||
pub type QueueOperation = Box<dyn PendingOperation>; |
||||
|
||||
/// Queue of generic operations that can be submitted to a destination chain.
|
||||
/// Includes logic for maintaining queue metrics by the destination and `app_context` of an operation
|
||||
#[derive(Debug, Clone, new)] |
||||
pub struct OpQueue { |
||||
metrics: IntGaugeVec, |
||||
queue_metrics_label: String, |
||||
retry_rx: MpmcReceiver<MessageRetryRequest>, |
||||
#[new(default)] |
||||
queue: Arc<Mutex<BinaryHeap<Reverse<QueueOperation>>>>, |
||||
} |
||||
|
||||
impl OpQueue { |
||||
/// Push an element onto the queue and update metrics
|
||||
pub async fn push(&self, op: QueueOperation) { |
||||
// increment the metric before pushing onto the queue, because we lose ownership afterwards
|
||||
self.get_operation_metric(op.as_ref()).inc(); |
||||
|
||||
self.queue.lock().await.push(Reverse(op)); |
||||
} |
||||
|
||||
/// Pop an element from the queue and update metrics
|
||||
pub async fn pop(&mut self) -> Option<Reverse<QueueOperation>> { |
||||
self.process_retry_requests().await; |
||||
let op = self.queue.lock().await.pop(); |
||||
op.map(|op| { |
||||
// even if the metric is decremented here, the operation may fail to process and be re-added to the queue.
|
||||
// in those cases, the queue length will decrease to zero until the operation is re-added.
|
||||
self.get_operation_metric(op.0.as_ref()).dec(); |
||||
op |
||||
}) |
||||
} |
||||
|
||||
pub async fn process_retry_requests(&mut self) { |
||||
// TODO: could rate-limit ourselves here, but we expect the volume of messages over this channel to
|
||||
// be very low.
|
||||
// The other consideration is whether to put the channel receiver in the OpQueue or in a dedicated task
|
||||
// that also holds an Arc to the Mutex. For simplicity, we'll put it in the OpQueue for now.
|
||||
let mut message_retry_requests = vec![]; |
||||
while let Ok(message_id) = self.retry_rx.receiver.try_recv() { |
||||
message_retry_requests.push(message_id); |
||||
} |
||||
if message_retry_requests.is_empty() { |
||||
return; |
||||
} |
||||
let mut queue = self.queue.lock().await; |
||||
let mut reprioritized_queue: BinaryHeap<_> = queue |
||||
.drain() |
||||
.map(|Reverse(mut e)| { |
||||
// Can check for equality here because of the PartialEq implementation for MessageRetryRequest,
|
||||
// but can't use `contains` because the types are different
|
||||
if message_retry_requests.iter().any(|r| r == e) { |
||||
let destination_domain = e.destination_domain().to_string(); |
||||
info!( |
||||
id = ?e.id(), |
||||
destination_domain, "Retrying OpQueue operation" |
||||
); |
||||
e.reset_attempts() |
||||
} |
||||
Reverse(e) |
||||
}) |
||||
.collect(); |
||||
queue.append(&mut reprioritized_queue); |
||||
} |
||||
|
||||
/// Get the metric associated with this operation
|
||||
fn get_operation_metric(&self, operation: &dyn PendingOperation) -> IntGauge { |
||||
let (destination, app_context) = operation.get_operation_labels(); |
||||
self.metrics |
||||
.with_label_values(&[&destination, &self.queue_metrics_label, &app_context]) |
||||
} |
||||
} |
||||
|
||||
#[cfg(test)] |
||||
mod test { |
||||
use super::*; |
||||
use crate::msg::pending_operation::PendingOperationResult; |
||||
use hyperlane_core::{HyperlaneDomain, KnownHyperlaneDomain, MpmcChannel, H256}; |
||||
use std::{ |
||||
collections::VecDeque, |
||||
time::{Duration, Instant}, |
||||
}; |
||||
|
||||
#[derive(Debug, Clone)] |
||||
struct MockPendingOperation { |
||||
id: H256, |
||||
seconds_to_next_attempt: u64, |
||||
destination_domain: HyperlaneDomain, |
||||
} |
||||
|
||||
impl MockPendingOperation { |
||||
fn new(seconds_to_next_attempt: u64, destination_domain: HyperlaneDomain) -> Self { |
||||
Self { |
||||
id: H256::random(), |
||||
seconds_to_next_attempt, |
||||
destination_domain, |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[async_trait::async_trait] |
||||
impl PendingOperation for MockPendingOperation { |
||||
fn id(&self) -> H256 { |
||||
self.id |
||||
} |
||||
|
||||
fn reset_attempts(&mut self) { |
||||
self.seconds_to_next_attempt = 0; |
||||
} |
||||
|
||||
fn priority(&self) -> u32 { |
||||
todo!() |
||||
} |
||||
|
||||
fn get_operation_labels(&self) -> (String, String) { |
||||
Default::default() |
||||
} |
||||
|
||||
fn origin_domain_id(&self) -> u32 { |
||||
todo!() |
||||
} |
||||
|
||||
fn destination_domain(&self) -> &HyperlaneDomain { |
||||
&self.destination_domain |
||||
} |
||||
|
||||
fn app_context(&self) -> Option<String> { |
||||
todo!() |
||||
} |
||||
|
||||
async fn prepare(&mut self) -> PendingOperationResult { |
||||
todo!() |
||||
} |
||||
|
||||
/// Submit this operation to the blockchain and report if it was successful
|
||||
/// or not.
|
||||
async fn submit(&mut self) -> PendingOperationResult { |
||||
todo!() |
||||
} |
||||
|
||||
/// This will be called after the operation has been submitted and is
|
||||
/// responsible for checking if the operation has reached a point at
|
||||
/// which we consider it safe from reorgs.
|
||||
async fn confirm(&mut self) -> PendingOperationResult { |
||||
todo!() |
||||
} |
||||
|
||||
fn next_attempt_after(&self) -> Option<Instant> { |
||||
Some( |
||||
Instant::now() |
||||
.checked_add(Duration::from_secs(self.seconds_to_next_attempt)) |
||||
.unwrap(), |
||||
) |
||||
} |
||||
|
||||
fn set_retries(&mut self, _retries: u32) { |
||||
todo!() |
||||
} |
||||
} |
||||
|
||||
fn dummy_metrics_and_label() -> (IntGaugeVec, String) { |
||||
( |
||||
IntGaugeVec::new( |
||||
prometheus::Opts::new("op_queue", "OpQueue metrics"), |
||||
&["destination", "queue_metrics_label", "app_context"], |
||||
) |
||||
.unwrap(), |
||||
"queue_metrics_label".to_string(), |
||||
) |
||||
} |
||||
|
||||
#[tokio::test] |
||||
async fn test_multiple_op_queues_message_id() { |
||||
let (metrics, queue_metrics_label) = dummy_metrics_and_label(); |
||||
let mpmc_channel = MpmcChannel::new(100); |
||||
let mut op_queue_1 = OpQueue::new( |
||||
metrics.clone(), |
||||
queue_metrics_label.clone(), |
||||
mpmc_channel.receiver(), |
||||
); |
||||
let mut op_queue_2 = OpQueue::new(metrics, queue_metrics_label, mpmc_channel.receiver()); |
||||
|
||||
// Add some operations to the queue with increasing `next_attempt_after` values
|
||||
let destination_domain: HyperlaneDomain = KnownHyperlaneDomain::Injective.into(); |
||||
let messages_to_send = 5; |
||||
let mut ops: VecDeque<_> = (1..=messages_to_send) |
||||
.into_iter() |
||||
.map(|seconds_to_next_attempt| { |
||||
Box::new(MockPendingOperation::new( |
||||
seconds_to_next_attempt, |
||||
destination_domain.clone(), |
||||
)) as QueueOperation |
||||
}) |
||||
.collect(); |
||||
let op_ids: Vec<_> = ops.iter().map(|op| op.id()).collect(); |
||||
|
||||
// push to queue 1
|
||||
for _ in 0..=2 { |
||||
op_queue_1.push(ops.pop_front().unwrap()).await; |
||||
} |
||||
|
||||
// push to queue 2
|
||||
for _ in 3..messages_to_send { |
||||
op_queue_2.push(ops.pop_front().unwrap()).await; |
||||
} |
||||
|
||||
// Retry by message ids
|
||||
let mpmc_tx = mpmc_channel.sender(); |
||||
mpmc_tx |
||||
.send(MessageRetryRequest::MessageId(op_ids[1])) |
||||
.unwrap(); |
||||
mpmc_tx |
||||
.send(MessageRetryRequest::MessageId(op_ids[2])) |
||||
.unwrap(); |
||||
|
||||
// Pop elements from queue 1
|
||||
let mut queue_1_popped = vec![]; |
||||
while let Some(op) = op_queue_1.pop().await { |
||||
queue_1_popped.push(op.0); |
||||
} |
||||
|
||||
// The elements sent over the channel should be the first ones popped,
|
||||
// regardless of their initial `next_attempt_after`
|
||||
assert_eq!(queue_1_popped[0].id(), op_ids[2]); |
||||
assert_eq!(queue_1_popped[1].id(), op_ids[1]); |
||||
assert_eq!(queue_1_popped[2].id(), op_ids[0]); |
||||
|
||||
// Pop elements from queue 2
|
||||
let mut queue_2_popped = vec![]; |
||||
while let Some(op) = op_queue_2.pop().await { |
||||
queue_2_popped.push(op.0); |
||||
} |
||||
|
||||
// The elements should be popped in the order they were pushed, because there was no retry request for them
|
||||
assert_eq!(queue_2_popped[0].id(), op_ids[3]); |
||||
assert_eq!(queue_2_popped[1].id(), op_ids[4]); |
||||
} |
||||
|
||||
#[tokio::test] |
||||
async fn test_destination_domain() { |
||||
let (metrics, queue_metrics_label) = dummy_metrics_and_label(); |
||||
let mpmc_channel = MpmcChannel::new(100); |
||||
let mut op_queue = OpQueue::new( |
||||
metrics.clone(), |
||||
queue_metrics_label.clone(), |
||||
mpmc_channel.receiver(), |
||||
); |
||||
|
||||
// Add some operations to the queue with increasing `next_attempt_after` values
|
||||
let destination_domain_1: HyperlaneDomain = KnownHyperlaneDomain::Injective.into(); |
||||
let destination_domain_2: HyperlaneDomain = KnownHyperlaneDomain::Ethereum.into(); |
||||
let ops = vec![ |
||||
Box::new(MockPendingOperation::new(1, destination_domain_1.clone())) as QueueOperation, |
||||
Box::new(MockPendingOperation::new(2, destination_domain_1.clone())) as QueueOperation, |
||||
Box::new(MockPendingOperation::new(3, destination_domain_2.clone())) as QueueOperation, |
||||
Box::new(MockPendingOperation::new(4, destination_domain_2.clone())) as QueueOperation, |
||||
Box::new(MockPendingOperation::new(5, destination_domain_2.clone())) as QueueOperation, |
||||
]; |
||||
|
||||
let op_ids: Vec<_> = ops.iter().map(|op| op.id()).collect(); |
||||
|
||||
// push to queue
|
||||
for op in ops { |
||||
op_queue.push(op).await; |
||||
} |
||||
|
||||
// Retry by domain
|
||||
let mpmc_tx = mpmc_channel.sender(); |
||||
mpmc_tx |
||||
.send(MessageRetryRequest::DestinationDomain( |
||||
destination_domain_2.id(), |
||||
)) |
||||
.unwrap(); |
||||
|
||||
// Pop elements from queue
|
||||
let mut popped = vec![]; |
||||
while let Some(op) = op_queue.pop().await { |
||||
popped.push(op.0.id()); |
||||
} |
||||
|
||||
// First messages should be those to `destination_domain_2` - their exact order depends on
|
||||
// how they were stored in the heap
|
||||
assert_eq!(popped[0], op_ids[2]); |
||||
assert_eq!(popped[1], op_ids[4]); |
||||
assert_eq!(popped[2], op_ids[3]); |
||||
// Non-retried messages should be at the end
|
||||
assert_eq!(popped[3], op_ids[0]); |
||||
assert_eq!(popped[4], op_ids[1]); |
||||
} |
||||
} |
@ -0,0 +1,179 @@ |
||||
use axum::{ |
||||
extract::{Query, State}, |
||||
routing, Router, |
||||
}; |
||||
use derive_new::new; |
||||
use hyperlane_core::{ChainCommunicationError, H256}; |
||||
use serde::Deserialize; |
||||
use std::str::FromStr; |
||||
use tokio::sync::broadcast::Sender; |
||||
|
||||
use crate::msg::op_queue::QueueOperation; |
||||
|
||||
const MESSAGE_RETRY_API_BASE: &str = "/message_retry"; |
||||
pub const ENDPOINT_MESSAGES_QUEUE_SIZE: usize = 1_000; |
||||
|
||||
/// Returns a vector of agent-specific endpoint routes to be served.
|
||||
/// Can be extended with additional routes and feature flags to enable/disable individually.
|
||||
pub fn routes(tx: Sender<MessageRetryRequest>) -> Vec<(&'static str, Router)> { |
||||
let message_retry_api = MessageRetryApi::new(tx); |
||||
|
||||
vec![message_retry_api.get_route()] |
||||
} |
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)] |
||||
pub enum MessageRetryRequest { |
||||
MessageId(H256), |
||||
DestinationDomain(u32), |
||||
} |
||||
|
||||
impl PartialEq<QueueOperation> for &MessageRetryRequest { |
||||
fn eq(&self, other: &QueueOperation) -> bool { |
||||
match self { |
||||
MessageRetryRequest::MessageId(message_id) => message_id == &other.id(), |
||||
MessageRetryRequest::DestinationDomain(destination_domain) => { |
||||
destination_domain == &other.destination_domain().id() |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(new, Clone)] |
||||
pub struct MessageRetryApi { |
||||
tx: Sender<MessageRetryRequest>, |
||||
} |
||||
|
||||
#[derive(Deserialize)] |
||||
struct RawMessageRetryRequest { |
||||
message_id: Option<String>, |
||||
destination_domain: Option<u32>, |
||||
} |
||||
|
||||
impl TryFrom<RawMessageRetryRequest> for Vec<MessageRetryRequest> { |
||||
type Error = ChainCommunicationError; |
||||
|
||||
fn try_from(request: RawMessageRetryRequest) -> Result<Self, Self::Error> { |
||||
let mut retry_requests = Vec::new(); |
||||
if let Some(message_id) = request.message_id { |
||||
retry_requests.push(MessageRetryRequest::MessageId(H256::from_str(&message_id)?)); |
||||
} |
||||
if let Some(destination_domain) = request.destination_domain { |
||||
retry_requests.push(MessageRetryRequest::DestinationDomain(destination_domain)); |
||||
} |
||||
Ok(retry_requests) |
||||
} |
||||
} |
||||
|
||||
async fn retry_message( |
||||
State(tx): State<Sender<MessageRetryRequest>>, |
||||
Query(request): Query<RawMessageRetryRequest>, |
||||
) -> String { |
||||
let retry_requests: Vec<MessageRetryRequest> = match request.try_into() { |
||||
Ok(retry_requests) => retry_requests, |
||||
// Technically it's bad practice to print the error message to the user, but
|
||||
// this endpoint is for debugging purposes only.
|
||||
Err(err) => { |
||||
return format!("Failed to parse retry request: {}", err); |
||||
} |
||||
}; |
||||
|
||||
if retry_requests.is_empty() { |
||||
return "No retry requests found. Please provide either a message_id or destination_domain.".to_string(); |
||||
} |
||||
|
||||
if let Err(err) = retry_requests |
||||
.into_iter() |
||||
.map(|req| tx.send(req)) |
||||
.collect::<Result<Vec<_>, _>>() |
||||
{ |
||||
return format!("Failed to send retry request to the queue: {}", err); |
||||
} |
||||
|
||||
"Moved message(s) to the front of the queue".to_string() |
||||
} |
||||
|
||||
impl MessageRetryApi { |
||||
pub fn router(&self) -> Router { |
||||
Router::new() |
||||
.route("/", routing::get(retry_message)) |
||||
.with_state(self.tx.clone()) |
||||
} |
||||
|
||||
pub fn get_route(&self) -> (&'static str, Router) { |
||||
(MESSAGE_RETRY_API_BASE, self.router()) |
||||
} |
||||
} |
||||
|
||||
#[cfg(test)] |
||||
mod tests { |
||||
use super::*; |
||||
use axum::http::StatusCode; |
||||
use ethers::utils::hex::ToHex; |
||||
use hyperlane_core::{MpmcChannel, MpmcReceiver}; |
||||
use std::net::SocketAddr; |
||||
|
||||
fn setup_test_server() -> (SocketAddr, MpmcReceiver<MessageRetryRequest>) { |
||||
let mpmc_channel = MpmcChannel::<MessageRetryRequest>::new(ENDPOINT_MESSAGES_QUEUE_SIZE); |
||||
let message_retry_api = MessageRetryApi::new(mpmc_channel.sender()); |
||||
let (path, retry_router) = message_retry_api.get_route(); |
||||
let app = Router::new().nest(path, retry_router); |
||||
|
||||
// Running the app in the background using a test server
|
||||
let server = |
||||
axum::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve(app.into_make_service()); |
||||
let addr = server.local_addr(); |
||||
tokio::spawn(server); |
||||
|
||||
(addr, mpmc_channel.receiver()) |
||||
} |
||||
|
||||
#[tokio::test] |
||||
async fn test_message_id_retry() { |
||||
let (addr, mut rx) = setup_test_server(); |
||||
|
||||
// Create a random message ID
|
||||
let message_id = H256::random(); |
||||
|
||||
// Send a GET request to the server
|
||||
let response = reqwest::get(format!( |
||||
"http://{}{}?message_id={}", |
||||
addr, |
||||
MESSAGE_RETRY_API_BASE, |
||||
message_id.encode_hex::<String>() |
||||
)) |
||||
.await |
||||
.unwrap(); |
||||
|
||||
// Check that the response status code is OK
|
||||
assert_eq!(response.status(), StatusCode::OK); |
||||
|
||||
assert_eq!( |
||||
rx.receiver.try_recv().unwrap(), |
||||
MessageRetryRequest::MessageId(message_id) |
||||
); |
||||
} |
||||
|
||||
#[tokio::test] |
||||
async fn test_destination_domain_retry() { |
||||
let (addr, mut rx) = setup_test_server(); |
||||
|
||||
// Create a random destination domain
|
||||
let destination_domain = 42; |
||||
|
||||
// Send a GET request to the server
|
||||
let response = reqwest::get(format!( |
||||
"http://{}{}?destination_domain={}", |
||||
addr, MESSAGE_RETRY_API_BASE, destination_domain |
||||
)) |
||||
.await |
||||
.unwrap(); |
||||
|
||||
// Check that the response status code is OK
|
||||
assert_eq!(response.status(), StatusCode::OK); |
||||
|
||||
assert_eq!( |
||||
rx.receiver.try_recv().unwrap(), |
||||
MessageRetryRequest::DestinationDomain(destination_domain) |
||||
); |
||||
} |
||||
} |
@ -1,5 +1,19 @@ |
||||
pub mod eigen_node; |
||||
pub use eigen_node::EigenNodeAPI; |
||||
use std::{sync::Arc, vec}; |
||||
|
||||
pub mod validator_server; |
||||
pub use validator_server::ValidatorServer; |
||||
use axum::Router; |
||||
pub use eigen_node::EigenNodeApi; |
||||
|
||||
use hyperlane_base::CoreMetrics; |
||||
use hyperlane_core::HyperlaneDomain; |
||||
|
||||
/// Returns a vector of validator-specific endpoint routes to be served.
|
||||
/// Can be extended with additional routes and feature flags to enable/disable individually.
|
||||
pub fn routes( |
||||
origin_chain: HyperlaneDomain, |
||||
metrics: Arc<CoreMetrics>, |
||||
) -> Vec<(&'static str, Router)> { |
||||
let eigen_node_api = EigenNodeApi::new(origin_chain, metrics); |
||||
|
||||
vec![eigen_node_api.get_route()] |
||||
} |
||||
|
@ -1,20 +0,0 @@ |
||||
use crate::server::eigen_node::EigenNodeAPI; |
||||
use axum::routing::Router; |
||||
use hyperlane_base::CoreMetrics; // Add missing import statement
|
||||
use hyperlane_core::HyperlaneDomain; |
||||
use std::sync::Arc; |
||||
|
||||
pub struct ValidatorServer { |
||||
pub routes: Vec<(&'static str, Router)>, |
||||
} |
||||
|
||||
impl ValidatorServer { |
||||
// add routes for servering EigenLayer specific routes compliant with the spec here https://eigen.nethermind.io/docs/spec/api/
|
||||
pub fn new(origin_chain: HyperlaneDomain, metrics: Arc<CoreMetrics>) -> Self { |
||||
let mut routes = vec![]; |
||||
let eigen_node_api = EigenNodeAPI::new(origin_chain, metrics); |
||||
routes.push(("/eigen", eigen_node_api.router())); |
||||
|
||||
Self { routes } |
||||
} |
||||
} |
@ -0,0 +1,50 @@ |
||||
use derive_new::new; |
||||
use tokio::sync::broadcast::{Receiver, Sender}; |
||||
|
||||
/// Multi-producer, multi-consumer channel
|
||||
pub struct MpmcChannel<T> { |
||||
sender: Sender<T>, |
||||
receiver: MpmcReceiver<T>, |
||||
} |
||||
|
||||
impl<T: Clone> MpmcChannel<T> { |
||||
/// Creates a new `MpmcChannel` with the specified capacity.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `capacity` - The maximum number of messages that can be buffered in the channel.
|
||||
pub fn new(capacity: usize) -> Self { |
||||
let (sender, receiver) = tokio::sync::broadcast::channel(capacity); |
||||
Self { |
||||
sender: sender.clone(), |
||||
receiver: MpmcReceiver::new(sender, receiver), |
||||
} |
||||
} |
||||
|
||||
/// Returns a clone of the sender end of the channel.
|
||||
pub fn sender(&self) -> Sender<T> { |
||||
self.sender.clone() |
||||
} |
||||
|
||||
/// Returns a clone of the receiver end of the channel.
|
||||
pub fn receiver(&self) -> MpmcReceiver<T> { |
||||
self.receiver.clone() |
||||
} |
||||
} |
||||
|
||||
/// Clonable receiving end of a multi-producer, multi-consumer channel
|
||||
#[derive(Debug, new)] |
||||
pub struct MpmcReceiver<T> { |
||||
sender: Sender<T>, |
||||
/// The receiving end of the channel.
|
||||
pub receiver: Receiver<T>, |
||||
} |
||||
|
||||
impl<T> Clone for MpmcReceiver<T> { |
||||
fn clone(&self) -> Self { |
||||
Self { |
||||
sender: self.sender.clone(), |
||||
receiver: self.sender.subscribe(), |
||||
} |
||||
} |
||||
} |
Loading…
Reference in new issue