Skip to content

Commit

Permalink
fix(offchain): apply fixes suggested by rust-clippy
Browse files Browse the repository at this point in the history
Full list of warnings:

- https://rust-lang.github.io/rust-clippy/master/index.html#redundant_pattern_matching
- https://rust-lang.github.io/rust-clippy/master/index.html#/needless_borrow
- https://rust-lang.github.io/rust-clippy/master/index.html#/redundant_static_lifetimes
- https://rust-lang.github.io/rust-clippy/master/index.html#/map_flatten
- https://rust-lang.github.io/rust-clippy/master/index.html#/suspicious_doc_comments
- https://rust-lang.github.io/rust-clippy/master/index.html#/assign_op_pattern
- https://rust-lang.github.io/rust-clippy/master/index.html#redundant_clone
- https://rust-lang.github.io/rust-clippy/master/index.html#/clone_on_copy
- https://rust-lang.github.io/rust-clippy/master/index.html#single_char_pattern
- https://rust-lang.github.io/rust-clippy/master/index.html#derivable_impls
- https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_to_owned
- https://rust-lang.github.io/rust-clippy/master/index.html#needless_lifetimes
- https://rust-lang.github.io/rust-clippy/master/index.html#to_string_in_format_args
- https://rust-lang.github.io/rust-clippy/master/index.html#needless_question_mark
- https://rust-lang.github.io/rust-clippy/master/index.html#explicit_auto_deref
- https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
- https://rust-lang.github.io/rust-clippy/master/index.html#redundant_field_names
- https://rust-lang.github.io/rust-clippy/master/index.html#let_unit_value
- https://rust-lang.github.io/rust-clippy/master/index.html#/comparison_to_empty
- https://rust-lang.github.io/rust-clippy/master/index.html#/unnecessary_cast
  • Loading branch information
marcelstanley authored and gligneul committed Jul 31, 2023
1 parent c951db8 commit 313713b
Show file tree
Hide file tree
Showing 32 changed files with 71 additions and 95 deletions.
4 changes: 2 additions & 2 deletions offchain/advance-runner/src/broker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ impl BrokerFacade {
return Ok(INITIAL_ID.to_owned());
} else {
// This won't underflow because we know the epoch is not 0
epoch = epoch - 1;
epoch -= 1;
}

tracing::trace!(epoch, "searching for finish epoch input event");
Expand Down Expand Up @@ -107,7 +107,7 @@ impl BrokerFacade {
loop {
let result = self
.client
.consume_blocking(&self.inputs_stream, &last_id)
.consume_blocking(&self.inputs_stream, last_id)
.await;
if matches!(result, Err(BrokerError::ConsumeTimeout)) {
tracing::trace!("consume timed out, retrying");
Expand Down
2 changes: 1 addition & 1 deletion offchain/advance-runner/src/runner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ impl<Snap: SnapshotManager + std::fmt::Debug + 'static> Runner<Snap> {

let event = self
.broker
.consume_input(&last_id)
.consume_input(last_id)
.await
.context(ConsumeInputSnafu)?;
tracing::trace!("input event consumed from broker");
Expand Down
4 changes: 2 additions & 2 deletions offchain/advance-runner/src/server_manager/conversions.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
// (c) Cartesi and individual authors (see AUTHORS)
// SPDX-License-Identifier: Apache-2.0 (see LICENSE)

///! This module contains functions to convert from gRPC types to
///! rollups-events types
//! This module contains functions to convert from gRPC types to
//! rollups-events types
use grpc_interfaces::cartesi_machine::Hash;
use grpc_interfaces::cartesi_server_manager::{
Address, OutputEnum, OutputValidityProof, Proof,
Expand Down
2 changes: 1 addition & 1 deletion offchain/advance-runner/src/server_manager/facade.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ impl ServerManagerFacade {
grpc_call!(self, advance_state, {
let input_metadata = InputMetadata {
msg_sender: Some(Address {
data: input_metadata.msg_sender.inner().clone().into(),
data: (*input_metadata.msg_sender.inner()).into(),
}),
block_number: input_metadata.block_number,
timestamp: input_metadata.timestamp,
Expand Down
19 changes: 9 additions & 10 deletions offchain/advance-runner/src/snapshot/fs_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ impl SnapshotManager for FSSnapshotManager {

let path = fs::read_link(&self.config.snapshot_latest)
.context(ReadLinkSnafu)?;
ensure!(path.is_dir(), BrokenLinkSnafu { path: path.clone() });
ensure!(path.is_dir(), BrokenLinkSnafu { path });
tracing::trace!(?path, "followed latest link");

path.try_into()
Expand Down Expand Up @@ -131,7 +131,7 @@ impl SnapshotManager for FSSnapshotManager {
ensure!(
snapshot.path.parent() == Some(snapshot_dir),
WrongDirSnafu {
path: snapshot.path.clone()
path: snapshot.path
}
);
let (epoch, processed_input_count) = decode_filename(&snapshot.path)?;
Expand All @@ -143,7 +143,7 @@ impl SnapshotManager for FSSnapshotManager {
ensure!(
snapshot.path.is_dir(),
NotFoundSnafu {
path: snapshot.path.clone()
path: snapshot.path
}
);

Expand Down Expand Up @@ -172,7 +172,7 @@ impl SnapshotManager for FSSnapshotManager {
path: latest.clone()
}
);
fs::remove_file(&latest).context(SetLatestSnafu {
fs::remove_file(latest).context(SetLatestSnafu {
path: latest.clone(),
})?;
tracing::trace!("deleted previous latest symlink");
Expand All @@ -188,7 +188,7 @@ impl SnapshotManager for FSSnapshotManager {

// delete other snapshots
for path in snapshots.iter() {
fs::remove_dir_all(&path)
fs::remove_dir_all(path)
.context(RemoveSnafu { path: path.clone() })?;
}
tracing::trace!("deleted previous snapshots");
Expand All @@ -204,14 +204,13 @@ fn encode_filename(epoch: u64, processed_input_count: u64) -> String {
fn decode_filename(path: &Path) -> Result<(u64, u64), FSSnapshotError> {
let file_name = path
.file_name()
.map(|file_name| file_name.to_str())
.flatten()
.and_then(|file_name| file_name.to_str())
.context(DirNameSnafu {
path: path.to_owned(),
})?;
path: path.to_owned(),
})?;
tracing::trace!(file_name, "got snapshot file name");

let parts: Vec<_> = file_name.split("_").collect();
let parts: Vec<_> = file_name.split('_').collect();
ensure!(
parts.len() == 2,
DirNameSnafu {
Expand Down
4 changes: 2 additions & 2 deletions offchain/data/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ impl RepositoryConfig {
Redacted::new(format!(
"postgres://{}:{}@{}:{}/{}",
urlencoding::encode(&self.user),
urlencoding::encode(&self.password.inner()),
urlencoding::encode(self.password.inner()),
urlencoding::encode(&self.hostname),
self.port,
urlencoding::encode(&self.db)
Expand Down Expand Up @@ -64,7 +64,7 @@ impl From<RepositoryCLIConfig> for RepositoryConfig {
if cli_config.postgres_password.is_some() {
panic!("Both `postgres_password` and `postgres_password_file` arguments are set");
}
match std::fs::read_to_string(&filename) {
match std::fs::read_to_string(filename) {
Ok(password) => password,
Err(e) => {
panic!("Failed to read password from file: {:?}", e);
Expand Down
2 changes: 1 addition & 1 deletion offchain/data/src/pagination.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ impl Cursor {
/// Decode cursor from base64 String
pub fn decode(value: &str) -> Result<Cursor, Error> {
let bytes = base64_engine
.decode(&value)
.decode(value)
.context(DecodeBase64CursorSnafu)?;
let offset = std::str::from_utf8(&bytes)
.context(DecodeUTF8CursorSnafu)?
Expand Down
4 changes: 2 additions & 2 deletions offchain/data/src/repository.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ impl Repository {

/// Generate a boxed query from an input query filter
impl InputQueryFilter {
fn to_query<'a>(&'a self) -> schema::inputs::BoxedQuery<'a, Pg> {
fn to_query(&self) -> schema::inputs::BoxedQuery<'_, Pg> {
use schema::inputs::dsl;
let mut query = dsl::inputs.into_boxed();
if let Some(other) = self.index_greater_than {
Expand All @@ -242,7 +242,7 @@ impl InputQueryFilter {
macro_rules! impl_output_filter_to_query {
($filter: ty, $table: ident) => {
impl $filter {
fn to_query<'a>(&'a self) -> schema::$table::BoxedQuery<'a, Pg> {
fn to_query(&self) -> schema::$table::BoxedQuery<'_, Pg> {
use schema::$table::dsl;
let mut query = dsl::$table.into_boxed();
if let Some(other) = self.input_index {
Expand Down
11 changes: 1 addition & 10 deletions offchain/dispatcher/src/machine/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,12 @@ use async_trait::async_trait;

use self::rollups_broker::BrokerFacadeError;

#[derive(Debug)]
#[derive(Debug, Default)]
pub struct RollupStatus {
pub inputs_sent_count: u64,
pub last_event_is_finish_epoch: bool,
}

impl Default for RollupStatus {
fn default() -> Self {
RollupStatus {
inputs_sent_count: 0,
last_event_is_finish_epoch: false,
}
}
}

#[async_trait]
pub trait BrokerStatus: std::fmt::Debug {
async fn status(&self) -> Result<RollupStatus, BrokerFacadeError>;
Expand Down
4 changes: 2 additions & 2 deletions offchain/dispatcher/src/machine/rollups_broker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ impl BrokerFacade {
#[tracing::instrument(level = "trace", skip_all)]
async fn claim(
&self,
id: &String,
id: &str,
) -> Result<Option<Event<RollupsClaim>>, BrokerFacadeError> {
let mut broker = self.broker.lock().await;
let event = broker
Expand Down Expand Up @@ -297,7 +297,7 @@ fn build_next_input(
let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput {
metadata,
payload: input.payload.clone().into(),
tx_hash: (*input.tx_hash).0.into(),
tx_hash: input.tx_hash.0.into(),
});

RollupsInput {
Expand Down
8 changes: 4 additions & 4 deletions offchain/dispatcher/src/signer/aws_signer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,18 +88,18 @@ impl Signer for AwsSigner {
&self,
message: &TypedTransaction,
) -> Result<Signature, Self::Error> {
inner_aws_signer_call!(&self, sign_transaction, message)
inner_aws_signer_call!(self, sign_transaction, message)
}

async fn sign_typed_data<T: Eip712 + Send + Sync>(
&self,
payload: &T,
) -> Result<Signature, Self::Error> {
inner_aws_signer_call!(&self, sign_typed_data, payload)
inner_aws_signer_call!(self, sign_typed_data, payload)
}

fn address(&self) -> Address {
self.address.clone()
self.address
}

fn chain_id(&self) -> u64 {
Expand All @@ -111,7 +111,7 @@ impl Signer for AwsSigner {
key_id: self.key_id.clone(),
chain_id: chain_id.into(),
region: self.region.clone(),
address: self.address.clone(),
address: self.address,
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion offchain/dispatcher/src/signer/signer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ impl ConditionalSigner {
const DEFAULT_ACCOUNT_INDEX: u32 = 0;
let index = account_index.unwrap_or(DEFAULT_ACCOUNT_INDEX);
let wallet = MnemonicBuilder::<English>::default()
.phrase(mnemonic.clone().as_str())
.phrase(mnemonic.as_str())
.index(index)
.context(LocalWalletSnafu)?
.build()
Expand Down
2 changes: 1 addition & 1 deletion offchain/graphql-server/src/schema/generate_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ fn main() {
match write!(graphql_schema_file, "{}", graphql_schema) {
Ok(_) => {}
Err(e) => {
eprintln!("Error writing schema to file {}", e.to_string());
eprintln!("Error writing schema to file {}", e);
}
}
}
2 changes: 1 addition & 1 deletion offchain/host-runner/src/controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ fn send_response<U>(tx: oneshot::Sender<U>, response: U)
where
U: std::fmt::Debug + Send + Sync,
{
if let Err(_) = tx.send(response) {
if tx.send(response).is_err() {
tracing::warn!("failed to send response (channel dropped)");
}
}
Expand Down
10 changes: 5 additions & 5 deletions offchain/host-runner/src/grpc/server_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ impl ServerManager for ServerManagerService {
) -> Result<Response<FinishEpochResponse>, Status> {
let request = request.into_inner();
tracing::info!("received finish_epoch with id={}", request.session_id);
if request.storage_directory != "" {
if !request.storage_directory.is_empty() {
tracing::warn!("ignoring storage_directory parameter");
}
let response = self
Expand Down Expand Up @@ -269,7 +269,7 @@ impl SessionManager {
processed_input_count: u64,
controller: Controller,
) -> Result<(), Status> {
if session_id == "" {
if session_id.is_empty() {
return Err(Status::invalid_argument("session id is empty"));
}
let mut entry = self.entry.lock().await;
Expand Down Expand Up @@ -305,7 +305,7 @@ impl SessionManager {
}

async fn try_del_session(&self, request_id: &String) -> Result<(), Status> {
self.try_get_session(&request_id)
self.try_get_session(request_id)
.await?
.try_lock()
.or(Err(Status::aborted("concurrent call in session")))?
Expand Down Expand Up @@ -675,7 +675,7 @@ impl Epoch {
self.state = EpochState::Finished;

let machine_state_hash = GrpcHash {
data: vec![0 as u8; HASH_SIZE],
data: vec![0_u8; HASH_SIZE],
};
let mut proofs: Vec<GrpcProof> = vec![];
let index = Token::Int(U256::from(epoch_index));
Expand Down Expand Up @@ -893,7 +893,7 @@ impl Epoch {
&self,
processed_input_count_within_epoch: u64,
) -> Result<(), Status> {
if self.get_num_processed_inputs_within_epoch() as u64
if self.get_num_processed_inputs_within_epoch()
!= processed_input_count_within_epoch
{
Err(Status::invalid_argument(format!(
Expand Down
2 changes: 1 addition & 1 deletion offchain/host-runner/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ async fn main() {
if let Err(e) = controller.shutdown().await.await {
tracing::error!("failed to shutdown controller ({})", e);
}
if let Err(_) = grpc_shutdown_tx.send(()) {
if grpc_shutdown_tx.send(()).is_err() {
tracing::error!("failed to send the shutdown signal to grpc");
}
if let Err(e) = grpc_service.await {
Expand Down
6 changes: 3 additions & 3 deletions offchain/indexer/src/conversions.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
// (c) Cartesi and individual authors (see AUTHORS)
// SPDX-License-Identifier: Apache-2.0 (see LICENSE)

///! Convert from rollups-events types to rollups-data types.
///! This code cannot use the From trait because both types are defined in
///! external crates.
//! Convert from rollups-events types to rollups-data types.
//! This code cannot use the From trait because both types are defined in
//! external crates.
use chrono::naive::NaiveDateTime;

use rollups_events::{
Expand Down
2 changes: 1 addition & 1 deletion offchain/inspect-server/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ fn load_config_file<T: Default + serde::de::DeserializeOwned>(
) -> Result<T, ConfigError> {
match config_file {
Some(config) => {
let s = std::fs::read_to_string(&config).context(FileSnafu)?;
let s = std::fs::read_to_string(config).context(FileSnafu)?;

let file_config: T = toml::from_str(&s).context(ParseSnafu)?;

Expand Down
2 changes: 1 addition & 1 deletion offchain/inspect-server/src/inspect.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ fn respond(
response_tx: oneshot::Sender<Result<InspectStateResponse, InspectError>>,
response: Result<InspectStateResponse, InspectError>,
) {
if let Err(_) = response_tx.send(response) {
if response_tx.send(response).is_err() {
log::warn!("failed to respond inspect request (client dropped)");
}
}
Expand Down
2 changes: 1 addition & 1 deletion offchain/redacted/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ impl fmt::Debug for RedactedUrl {
}
})
.and_then(|_| {
if let Some(_) = url.password() {
if url.password().is_some() {
url.set_password(Some("***"))
} else {
Ok(())
Expand Down
18 changes: 8 additions & 10 deletions offchain/rollups-events/src/broker/indexer.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
// (c) Cartesi and individual authors (see AUTHORS)
// SPDX-License-Identifier: Apache-2.0 (see LICENSE)

///! This module is an indexer-specific extension for the broker
///!
///! It would be too complex to implement the indexer extension as a generic broker method.
///! Instead, we decided to implement the extension that we need for the indexer as a submodule.
///! This extension should be in this crate because it accesses the Redis interface directly.
///! (All Redis interaction should be hidden in this crate.)
//! This module is an indexer-specific extension for the broker
//!
//! It would be too complex to implement the indexer extension as a generic broker method.
//! Instead, we decided to implement the extension that we need for the indexer as a submodule.
//! This extension should be in this crate because it accesses the Redis interface directly.
//! (All Redis interaction should be hidden in this crate.)
use backoff::future::retry;
use redis::streams::{StreamReadOptions, StreamReadReply};
use redis::AsyncCommands;
Expand Down Expand Up @@ -80,8 +80,7 @@ impl Broker {
.keys
.iter_mut()
.find(|stream| stream.key == input_stream_key)
.map(|stream| stream.ids.pop())
.flatten();
.and_then(|stream| stream.ids.pop());
if let Some(stream_id) = input_stream_id {
tracing::trace!("found input event; parsing it");
let event: Event<RollupsInput> = stream_id.try_into()?;
Expand All @@ -93,8 +92,7 @@ impl Broker {
.keys
.iter_mut()
.find(|stream| stream.key == output_stream_key)
.map(|stream| stream.ids.pop())
.flatten();
.and_then(|stream| stream.ids.pop());
if let Some(stream_id) = output_stream_id {
tracing::trace!("found output event; parsing it");
let event: Event<RollupsOutput> = stream_id.try_into()?;
Expand Down
2 changes: 1 addition & 1 deletion offchain/rollups-events/src/broker/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ pub use redacted::{RedactedUrl, Url};

pub mod indexer;

pub const INITIAL_ID: &'static str = "0";
pub const INITIAL_ID: &str = "0";

/// The `BrokerConnection` enum implements the `ConnectionLike` trait
/// to satisfy the `AsyncCommands` trait bounds.
Expand Down
Loading

0 comments on commit 313713b

Please sign in to comment.