Operational cleanup (#707)

* Reduce log level for common message

* retry more of the errors and downgrade log level for non-stop cases

* cleanup

* reduce error logs
pull/715/head
Mattie Conover 2 years ago committed by GitHub
parent 2e8d9c9776
commit 8d0509a4d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      rust/agents/relayer/src/msg/processor.rs
  2. 41
      rust/chains/abacus-ethereum/src/retrying.rs
  3. 2
      rust/chains/abacus-ethereum/src/validator_manager.rs

@ -116,7 +116,7 @@ impl MessageProcessor {
debug!(msg=?msg, "Working on msg");
msg
} else {
warn!(
debug!(
"Leaf in db without message idx: {}",
self.message_leaf_index
);
@ -126,7 +126,7 @@ impl MessageProcessor {
// here. For now, optimistically yield and then re-enter the loop in hopes that
// the DB is now coherent.
// TODO(webbhorn): Why can't we yield here instead of sleep?
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
tokio::time::sleep(Duration::from_secs(1)).await;
return Ok(());
};

@ -6,10 +6,16 @@ use serde::{de::DeserializeOwned, Serialize};
use serde_json::Value;
use thiserror::Error;
use tokio::time::sleep;
use tracing::{debug, instrument, trace, warn};
use tracing::{debug, info, instrument, trace, warn};
use crate::HttpClientError;
const METHODS_TO_NOT_RETRY: &[&str] = &[
"eth_estimateGas",
"eth_sendTransaction",
"eth_sendRawTransaction",
];
/// An HTTP Provider with a simple naive exponential backoff built-in
#[derive(Debug, Clone)]
pub struct RetryingProvider<P> {
@ -82,12 +88,7 @@ where
impl JsonRpcClient for RetryingProvider<Http> {
type Error = RetryingProviderError<Http>;
#[instrument(
level = "debug",
err,
skip(params),
fields(method = %method, params = %serde_json::to_string(&params).unwrap()))
]
#[instrument(level = "error", skip_all, fields(method = %method))]
async fn request<T, R>(&self, method: &str, params: T) -> Result<R, Self::Error>
where
T: Debug + Serialize + Send + Sync,
@ -99,6 +100,7 @@ impl JsonRpcClient for RetryingProvider<Http> {
let mut i = 1;
loop {
let backoff_ms = self.base_retry_ms * 2u64.pow(i - 1);
trace!(params = %serde_json::to_string(&params).unwrap_or_default(), "Dispatching request with params");
debug!(attempt = i, "Dispatching request");
let fut = match params {
@ -109,7 +111,7 @@ impl JsonRpcClient for RetryingProvider<Http> {
match fut.await {
Ok(res) => return Ok(res),
Err(HttpClientError::ReqwestError(e)) => {
warn!(
info!(
backoff_ms,
retries_remaining = self.max_requests - i,
error = %e,
@ -118,17 +120,22 @@ impl JsonRpcClient for RetryingProvider<Http> {
last_err = HttpClientError::ReqwestError(e);
}
Err(HttpClientError::JsonRpcError(e)) => {
// This is a client error so we do not want to retry on it.
warn!(error = %e, "JsonRpcError in retrying provider; not retrying.");
return Err(RetryingProviderError::JsonRpcClientError(
HttpClientError::JsonRpcError(e),
));
// We don't want to retry errors that are probably not going to work if we keep
// retrying them or that indicate an error in higher-order logic and not
// transient provider (connection or other) errors.
if METHODS_TO_NOT_RETRY.contains(&method) {
warn!(error = %e, "JsonRpcError in retrying provider; not retrying.");
return Err(RetryingProviderError::JsonRpcClientError(
HttpClientError::JsonRpcError(e),
));
} else {
info!(error = %e, "JsonRpcError in retrying provider.");
last_err = HttpClientError::JsonRpcError(e);
}
}
Err(HttpClientError::SerdeJson { err, text }) => {
warn!(error = %err, "SerdeJson error in retrying provider; not retrying.");
return Err(RetryingProviderError::JsonRpcClientError(
HttpClientError::SerdeJson { err, text },
));
info!(error = %err, "SerdeJson error in retrying provider");
last_err = HttpClientError::SerdeJson { err, text };
}
}

@ -90,7 +90,7 @@ impl<M> InboxValidatorManager for EthereumInboxValidatorManager<M>
where
M: Middleware + 'static,
{
#[tracing::instrument(err, skip(self))]
#[tracing::instrument(skip(self))]
async fn process(
&self,
multisig_signed_checkpoint: &MultisigSignedCheckpoint,

Loading…
Cancel
Save