From 545e03c413ebeba5fe6d8a7134eb0554dbd25732 Mon Sep 17 00:00:00 2001 From: Ethan Donowitz Date: Fri, 3 May 2024 16:31:13 -0400 Subject: [PATCH] treewide: Update Rust toolchain to 2024-05-02 commit message Change-Id: I56ea0995b899ce657b47bb42c6d2bef219db2516 --- .cargo/{config => config.toml} | 0 Cargo.lock | 10 +- Cargo.toml | 2 +- array2/src/lib.rs | 1 - benchmarks/src/reporting/analysis.rs | 2 - benchmarks/src/reporting/storage/json.rs | 1 - benchmarks/src/reporting/storage/postgres.rs | 1 - dataflow-state/src/lib.rs | 2 +- mysql-srv/src/commands.rs | 3 + mysql-srv/src/value/decode.rs | 52 ++------- mysql-srv/src/value/encode.rs | 90 +++++---------- mysql-time/src/lib.rs | 44 +++----- nom-sql/src/literal.rs | 12 +- partial-map/src/lib.rs | 2 +- psql-srv/src/lib.rs | 2 +- psql-srv/src/protocol.rs | 18 +-- reader-map/src/read.rs | 16 +-- reader-map/src/read/read_ref.rs | 4 +- reader-map/tests/proptest.rs | 4 +- readyset-adapter/src/backend.rs | 6 +- .../src/backend/noria_connector.rs | 4 +- readyset-adapter/src/query_status_cache.rs | 6 +- readyset-client/src/controller/rpc.rs | 2 +- readyset-client/src/lib.rs | 21 ++-- readyset-client/src/view/results.rs | 6 +- readyset-clustertest/src/readyset.rs | 2 +- readyset-common/src/lib.rs | 1 - readyset-common/src/records.rs | 12 +- readyset-data/src/lib.rs | 32 +++--- readyset-data/src/serde.rs | 103 +++++++++--------- readyset-dataflow/src/domain/channel/tcp.rs | 2 +- readyset-dataflow/src/domain/mod.rs | 2 +- readyset-dataflow/src/lib.rs | 1 - readyset-dataflow/src/node/ntype.rs | 22 ++-- readyset-dataflow/src/ops/filter.rs | 4 - readyset-dataflow/src/ops/grouped/concat.rs | 2 +- readyset-dataflow/src/ops/grouped/mod.rs | 4 - readyset-dataflow/src/ops/mod.rs | 29 +++-- readyset-dataflow/src/ops/paginate.rs | 4 - readyset-dataflow/src/ops/topk.rs | 4 - readyset-dataflow/src/ops/union.rs | 4 +- readyset-dataflow/src/payload.rs | 23 ++-- readyset-dataflow/src/processing.rs | 5 - readyset-mir/src/rewrite/predicate_pushup.rs | 2 +- readyset-mysql/tests/vertical.rs | 2 +- .../controller/migrate/materialization/mod.rs | 4 +- readyset-server/src/controller/migrate/mod.rs | 2 +- readyset-server/src/controller/mod.rs | 2 +- .../src/controller/sql/query_signature.rs | 13 +-- .../src/controller/sql/recipe/mod.rs | 6 +- readyset-server/src/controller/state.rs | 8 +- readyset-server/src/http_router.rs | 3 - readyset-server/src/startup.rs | 3 - readyset-sql-passes/src/implied_tables.rs | 2 +- readyset-telemetry-reporter/Cargo.toml | 2 +- readyset-util/src/lib.rs | 2 +- readyset-util/src/nonmaxusize.rs | 4 +- replicators/src/db_util.rs | 13 ++- .../src/postgres_connector/ddl_replication.rs | 1 + .../src/postgres_connector/snapshot.rs | 1 + replicators/tests/ddl_vertical.rs | 4 +- replicators/tests/tests.rs | 1 + rust-toolchain | 2 +- 63 files changed, 263 insertions(+), 381 deletions(-) rename .cargo/{config => config.toml} (100%) diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/Cargo.lock b/Cargo.lock index 3364213114..ab62e53883 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1321,7 +1321,7 @@ dependencies = [ name = "dataflow-state" version = "0.0.1" dependencies = [ - "ahash 0.7.8", + "ahash 0.8.11", "anyhow", "async-trait", "bincode", @@ -4356,7 +4356,7 @@ dependencies = [ name = "readyset-adapter" version = "0.1.0" dependencies = [ - "ahash 0.7.8", + "ahash 0.8.11", "anyhow", "async-trait", "bincode", @@ -4455,7 +4455,7 @@ dependencies = [ name = "readyset-client" version = "0.7.0" dependencies = [ - "ahash 0.7.8", + "ahash 0.8.11", "anyhow", "array2", "async-bincode", @@ -4678,7 +4678,7 @@ dependencies = [ name = "readyset-dataflow" version = "0.7.0" dependencies = [ - "ahash 0.7.8", + "ahash 0.8.11", "anyhow", "async-bincode", "backoff", @@ -4959,7 +4959,7 @@ dependencies = [ name = "readyset-server" version = "0.7.0" dependencies = [ - "ahash 0.7.8", + "ahash 0.8.11", "anyhow", "array2", "assert_approx_eq", diff --git a/Cargo.toml b/Cargo.toml index abb011e89a..ce4df294a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,7 +80,7 @@ rocksdb = { git = "https://github.com/readysettech/rust-rocksdb.git", default-fe metrics-exporter-prometheus = { git = "https://github.com/readysettech/metrics.git" } metrics = { git = "https://github.com/readysettech/metrics.git" } metrics-util = { git = "https://github.com/readysettech/metrics.git" } -ahash = "0.7" +ahash = "0.8" anyhow = "1.0.82" assert_approx_eq = "1.1.0" assert_unordered = "0.3" diff --git a/array2/src/lib.rs b/array2/src/lib.rs index 8ec5b6fce5..8be381d580 100644 --- a/array2/src/lib.rs +++ b/array2/src/lib.rs @@ -23,7 +23,6 @@ #![feature(int_roundings)] use std::fmt::Debug; use std::ops::{Index, IndexMut}; -use std::usize; use serde::{Deserialize, Serialize}; use thiserror::Error; diff --git a/benchmarks/src/reporting/analysis.rs b/benchmarks/src/reporting/analysis.rs index b5fece2ad5..f5316fe15b 100644 --- a/benchmarks/src/reporting/analysis.rs +++ b/benchmarks/src/reporting/analysis.rs @@ -38,7 +38,6 @@ impl AnalysisInfo { #[derive(Debug)] pub struct Stats { - pub samples: i64, pub mean: f64, pub stdev: f64, } @@ -46,7 +45,6 @@ pub struct Stats { impl Stats { pub fn with(hist: &hdrhistogram::Histogram) -> Stats { Stats { - samples: hist.len() as i64, mean: hist.mean(), stdev: hist.stdev(), } diff --git a/benchmarks/src/reporting/storage/json.rs b/benchmarks/src/reporting/storage/json.rs index ae2b6654d2..c2653f7b21 100644 --- a/benchmarks/src/reporting/storage/json.rs +++ b/benchmarks/src/reporting/storage/json.rs @@ -44,7 +44,6 @@ struct Data { impl Data { fn to_stats(&self) -> Stats { Stats { - samples: self.samples as i64, mean: self.mean, stdev: self.stdev, } diff --git a/benchmarks/src/reporting/storage/postgres.rs b/benchmarks/src/reporting/storage/postgres.rs index b0b4a90851..65e9218fb7 100644 --- a/benchmarks/src/reporting/storage/postgres.rs +++ b/benchmarks/src/reporting/storage/postgres.rs @@ -146,7 +146,6 @@ impl Storage for PostgresStorage { None } [row, ..] => Some(Stats { - samples: row.get(0), mean: row.get(1), stdev: row.get(2), }), diff --git a/dataflow-state/src/lib.rs b/dataflow-state/src/lib.rs index 667757113a..43279d1da5 100644 --- a/dataflow-state/src/lib.rs +++ b/dataflow-state/src/lib.rs @@ -1,4 +1,4 @@ -#![feature(stmt_expr_attributes, bound_map, iter_order_by, bound_as_ref)] +#![feature(stmt_expr_attributes, iter_order_by, bound_as_ref)] mod key; mod keyed_state; diff --git a/mysql-srv/src/commands.rs b/mysql-srv/src/commands.rs index c3431fc2ab..f23300e211 100644 --- a/mysql-srv/src/commands.rs +++ b/mysql-srv/src/commands.rs @@ -11,7 +11,9 @@ use crate::myc::constants::{CapabilityFlags, Command as CommandByte, UTF8MB4_GEN #[derive(Debug)] pub struct ClientHandshake<'a> { pub capabilities: CapabilityFlags, + #[allow(dead_code)] pub maxps: u32, + #[allow(dead_code)] pub charset: u16, pub username: &'a str, pub password: &'a [u8], @@ -24,6 +26,7 @@ pub struct ClientChangeUser<'a> { pub username: &'a str, pub password: &'a [u8], pub database: Option<&'a str>, + #[allow(dead_code)] pub charset: u16, pub auth_plugin_name: &'a str, } diff --git a/mysql-srv/src/value/decode.rs b/mysql-srv/src/value/decode.rs index bfe78b59ed..5e44bfea1f 100644 --- a/mysql-srv/src/value/decode.rs +++ b/mysql-srv/src/value/decode.rs @@ -456,59 +456,23 @@ mod tests { );*/ rt!(f64_one, f64, 1.0, ColumnType::MYSQL_TYPE_DOUBLE, false); - rt!( - u8_max, - u8, - u8::max_value(), - ColumnType::MYSQL_TYPE_TINY, - false - ); - rt!( - i8_max, - i8, - i8::max_value(), - ColumnType::MYSQL_TYPE_TINY, - true - ); - rt!( - u16_max, - u16, - u16::max_value(), - ColumnType::MYSQL_TYPE_SHORT, - false - ); - rt!( - i16_max, - i16, - i16::max_value(), - ColumnType::MYSQL_TYPE_SHORT, - true - ); - rt!( - u32_max, - u32, - u32::max_value(), - ColumnType::MYSQL_TYPE_LONG, - false - ); - rt!( - i32_max, - i32, - i32::max_value(), - ColumnType::MYSQL_TYPE_LONG, - true - ); + rt!(u8_max, u8, u8::MAX, ColumnType::MYSQL_TYPE_TINY, false); + rt!(i8_max, i8, i8::MAX, ColumnType::MYSQL_TYPE_TINY, true); + rt!(u16_max, u16, u16::MAX, ColumnType::MYSQL_TYPE_SHORT, false); + rt!(i16_max, i16, i16::MAX, ColumnType::MYSQL_TYPE_SHORT, true); + rt!(u32_max, u32, u32::MAX, ColumnType::MYSQL_TYPE_LONG, false); + rt!(i32_max, i32, i32::MAX, ColumnType::MYSQL_TYPE_LONG, true); rt!( u64_max, u64, - u64::max_value(), + u64::MAX, ColumnType::MYSQL_TYPE_LONGLONG, false ); rt!( i64_max, i64, - i64::max_value(), + i64::MAX, ColumnType::MYSQL_TYPE_LONGLONG, true ); diff --git a/mysql-srv/src/value/encode.rs b/mysql-srv/src/value/encode.rs index 1cd4017ac1..37bb0762dd 100644 --- a/mysql-srv/src/value/encode.rs +++ b/mysql-srv/src/value/encode.rs @@ -71,8 +71,8 @@ where // NOTE: yes, I know the = / => distinction is ugly macro_rules! like_try_into { ($self:ident, $source:ty = $target:ty, $w:ident, $m:ident, $c:ident) => {{ - let min = <$target>::min_value() as $source; - let max = <$target>::max_value() as $source; + let min = <$target>::MIN as $source; + let max = <$target>::MAX as $source; if *$self <= max && *$self >= min { $w.$m(*$self as $target) } else { @@ -80,8 +80,8 @@ macro_rules! like_try_into { } }}; ($self:ident, $source:ty => $target:ty, $w:ident, $m:ident, $c:ident) => {{ - let min = <$target>::min_value() as $source; - let max = <$target>::max_value() as $source; + let min = <$target>::MIN as $source; + let max = <$target>::MAX as $source; if *$self <= max && *$self >= min { $w.$m::(*$self as $target) } else { @@ -684,25 +684,25 @@ impl ToMySqlValue for myc::value::Value { // smallest containing type, and then call on that let signed = !c.colflags.contains(ColumnFlags::UNSIGNED_FLAG); if signed { - if n >= i64::from(i8::min_value()) && n <= i64::from(i8::max_value()) { + if n >= i64::from(i8::MIN) && n <= i64::from(i8::MAX) { (n as i8).to_mysql_bin(w, c) - } else if n >= i64::from(i16::min_value()) && n <= i64::from(i16::max_value()) { + } else if n >= i64::from(i16::MIN) && n <= i64::from(i16::MAX) { (n as i16).to_mysql_bin(w, c) - } else if n >= i64::from(i32::min_value()) && n <= i64::from(i32::max_value()) { + } else if n >= i64::from(i32::MIN) && n <= i64::from(i32::MAX) { (n as i32).to_mysql_bin(w, c) } else { n.to_mysql_bin(w, c) } } else if n < 0 { Err(bad(self, c)) - } else if n <= i64::from(u8::max_value()) { + } else if n <= i64::from(u8::MAX) { (n as u8).to_mysql_bin(w, c) - } else if n <= i64::from(u16::max_value()) { + } else if n <= i64::from(u16::MAX) { (n as u16).to_mysql_bin(w, c) - } else if n <= i64::from(u32::max_value()) { + } else if n <= i64::from(u32::MAX) { (n as u32).to_mysql_bin(w, c) } else { - // must work since u64::max_value() > i64::max_value(), and n >= 0 + // must work since u64::MAX > i64::MAX, and n >= 0 (n as u64).to_mysql_bin(w, c) } } @@ -793,14 +793,14 @@ mod tests { rt!(f32_one, f32, 1.0); rt!(f64_one, f64, 1.0); - rt!(u8_max, u8, u8::max_value()); - rt!(i8_max, i8, i8::max_value()); - rt!(u16_max, u16, u16::max_value()); - rt!(i16_max, i16, i16::max_value()); - rt!(u32_max, u32, u32::max_value()); - rt!(i32_max, i32, i32::max_value()); - rt!(u64_max, u64, u64::max_value()); - rt!(i64_max, i64, i64::max_value()); + rt!(u8_max, u8, u8::MAX); + rt!(i8_max, i8, i8::MAX); + rt!(u16_max, u16, u16::MAX); + rt!(i16_max, i16, i16::MAX); + rt!(u32_max, u32, u32::MAX); + rt!(i32_max, i32, i32::MAX); + rt!(u64_max, u64, u64::MAX); + rt!(i64_max, i64, i64::MAX); rt!(opt_none, Option, None); rt!(opt_some, Option, Some(1)); @@ -927,59 +927,23 @@ mod tests { );*/ rt!(f64_one, f64, 1.0, ColumnType::MYSQL_TYPE_DOUBLE, false); - rt!( - u8_max, - u8, - u8::max_value(), - ColumnType::MYSQL_TYPE_TINY, - false - ); - rt!( - i8_max, - i8, - i8::max_value(), - ColumnType::MYSQL_TYPE_TINY, - true - ); - rt!( - u16_max, - u16, - u16::max_value(), - ColumnType::MYSQL_TYPE_SHORT, - false - ); - rt!( - i16_max, - i16, - i16::max_value(), - ColumnType::MYSQL_TYPE_SHORT, - true - ); - rt!( - u32_max, - u32, - u32::max_value(), - ColumnType::MYSQL_TYPE_LONG, - false - ); - rt!( - i32_max, - i32, - i32::max_value(), - ColumnType::MYSQL_TYPE_LONG, - true - ); + rt!(u8_max, u8, u8::MAX, ColumnType::MYSQL_TYPE_TINY, false); + rt!(i8_max, i8, i8::MAX, ColumnType::MYSQL_TYPE_TINY, true); + rt!(u16_max, u16, u16::MAX, ColumnType::MYSQL_TYPE_SHORT, false); + rt!(i16_max, i16, i16::MAX, ColumnType::MYSQL_TYPE_SHORT, true); + rt!(u32_max, u32, u32::MAX, ColumnType::MYSQL_TYPE_LONG, false); + rt!(i32_max, i32, i32::MAX, ColumnType::MYSQL_TYPE_LONG, true); rt!( u64_max, u64, - u64::max_value(), + u64::MAX, ColumnType::MYSQL_TYPE_LONGLONG, false ); rt!( i64_max, i64, - i64::max_value(), + i64::MAX, ColumnType::MYSQL_TYPE_LONGLONG, true ); diff --git a/mysql-time/src/lib.rs b/mysql-time/src/lib.rs index 471693d548..d3991c892c 100644 --- a/mysql-time/src/lib.rs +++ b/mysql-time/src/lib.rs @@ -51,9 +51,19 @@ pub struct MySqlTime { } impl MySqlTime { + /// The maximum value that a [`MySqlTime`] can represent: `838:59:59`. + pub const MAX: MySqlTime = MySqlTime { + nanos: MAX_MYSQL_TIME_SECONDS * 10i64.pow(9), + }; + + /// The minimum value that a [`MySqlTime`] can represent: `-838:59:59`. + pub const MIN: MySqlTime = MySqlTime { + nanos: -MAX_MYSQL_TIME_SECONDS * 10i64.pow(9), + }; + /// Creates a new [`MySqlTime`] with the given [`chrono::Duration`]. /// Note that if the [`chrono::Duration`] surpasses the MySQL's TIME max value, then - /// the [`MySqlTime::max_value()`] is used (resp. [`MySqlTime::min_value()`] if the + /// the [`MySqlTime::MAX`] is used (resp. [`MySqlTime::MIN`] if the /// [`chrono::Duration`] falls below the MySQL's TIME min value). /// /// # Example @@ -82,10 +92,10 @@ impl MySqlTime { pub fn new(duration: Duration) -> MySqlTime { let secs = duration.num_seconds(); if secs > MAX_MYSQL_TIME_SECONDS { - return MySqlTime::max_value(); + return MySqlTime::MAX; } if secs < (-MAX_MYSQL_TIME_SECONDS) { - return MySqlTime::min_value(); + return MySqlTime::MIN; } MySqlTime { nanos: duration.num_nanoseconds().expect("Limit checked above"), @@ -192,32 +202,6 @@ impl MySqlTime { )) } - /// Returns the maximum value that a [`MySqlTime`] can represent: `838:59:59`. - /// - /// # Example - /// - /// ```rust - /// use mysql_time::MySqlTime; - /// - /// let mysql_time_max: MySqlTime = MySqlTime::max_value(); // 838:59:59 - /// ``` - pub fn max_value() -> MySqlTime { - MySqlTime::new(Duration::seconds(MAX_MYSQL_TIME_SECONDS)) - } - - /// Returns the minimum value that a [`MySqlTime`] can represent: `-838:59:59`. - /// - /// # Example - /// - /// ``` - /// use mysql_time::MySqlTime; - /// - /// let mysql_time_min: MySqlTime = MySqlTime::min_value(); // -838:59:59 - /// ``` - pub fn min_value() -> MySqlTime { - MySqlTime::new(Duration::seconds(-MAX_MYSQL_TIME_SECONDS)) - } - /// Returns the sign of the [`MySqlTime`] as 1 if it's positive, or -1 if it's negative. /// /// # Example @@ -747,7 +731,7 @@ mod tests { #[proptest] fn from_microseconds(#[strategy(arbitrary_duration())] duration: Duration) { let mysql_time = - MySqlTime::from_microseconds(duration.num_microseconds().unwrap_or(i64::max_value())); + MySqlTime::from_microseconds(duration.num_microseconds().unwrap_or(i64::MAX)); let total_secs = duration.num_seconds(); assert_valid!(mysql_time, total_secs); } diff --git a/nom-sql/src/literal.rs b/nom-sql/src/literal.rs index 321c2bb01d..418cceb22d 100644 --- a/nom-sql/src/literal.rs +++ b/nom-sql/src/literal.rs @@ -119,12 +119,12 @@ pub enum ItemPlaceholder { ColonNumber(u32), } -impl ToString for ItemPlaceholder { - fn to_string(&self) -> String { +impl fmt::Display for ItemPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - ItemPlaceholder::QuestionMark => "?".to_string(), - ItemPlaceholder::DollarNumber(ref i) => format!("${}", i), - ItemPlaceholder::ColonNumber(ref i) => format!(":{}", i), + ItemPlaceholder::QuestionMark => write!(f, "?"), + ItemPlaceholder::DollarNumber(ref i) => write!(f, "${}", i), + ItemPlaceholder::ColonNumber(ref i) => write!(f, ":{}", i), } } } @@ -259,7 +259,7 @@ impl DialectDisplay for Literal { write!(f, "X'{}'", b.iter().map(|v| format!("{:02X}", v)).join("")) } }, - Literal::Placeholder(item) => write!(f, "{}", item.to_string()), + Literal::Placeholder(item) => write!(f, "{}", item), Literal::BitVector(ref b) => { write!( f, diff --git a/partial-map/src/lib.rs b/partial-map/src/lib.rs index e25e75f9df..768835aba7 100644 --- a/partial-map/src/lib.rs +++ b/partial-map/src/lib.rs @@ -1,4 +1,4 @@ -#![feature(btree_extract_if, bound_map)] +#![feature(btree_extract_if)] use std::borrow::Borrow; pub use std::collections::btree_map::{Iter, Keys, Range, Values, ValuesMut}; diff --git a/psql-srv/src/lib.rs b/psql-srv/src/lib.rs index 325435e47e..387d2df5db 100644 --- a/psql-srv/src/lib.rs +++ b/psql-srv/src/lib.rs @@ -1,4 +1,4 @@ -#![feature(associated_type_bounds, let_chains)] +#![feature(let_chains)] //! Bindings for emulating a PostgreSQL server. //! //! When developing new databases or caching layers, it can be immensely useful to test your system diff --git a/psql-srv/src/protocol.rs b/psql-srv/src/protocol.rs index 900988ef33..32b64344cd 100644 --- a/psql-srv/src/protocol.rs +++ b/psql-srv/src/protocol.rs @@ -1969,7 +1969,7 @@ mod tests { parameter_data_types: vec![], }; block_on(protocol.on_request(parse_request, &mut backend, &mut channel)).unwrap(); - assert!(protocol.prepared_statements.get("prepared1").is_some()); + assert!(protocol.prepared_statements.contains_key("prepared1")); // A prepared statement close request calls close on the backend and removes Protocol state // for the prepared statement. @@ -1981,7 +1981,7 @@ mod tests { Response::Message(CloseComplete) )); assert_eq!(backend.last_close.unwrap(), DeallocateId::Numeric(0)); - assert!(protocol.prepared_statements.get("prepared1").is_none()); + assert!(!protocol.prepared_statements.contains_key("prepared1")); } #[test] @@ -2036,7 +2036,7 @@ mod tests { block_on(protocol.on_request(bind_request, &mut backend, &mut channel)).unwrap(), Response::Message(BindComplete) )); - assert!(protocol.portals.get("portal1").is_some()); + assert!(protocol.portals.contains_key("portal1")); // A portal close request removes Protocol state for the portal. let request = FrontendMessage::Close { @@ -2046,7 +2046,7 @@ mod tests { block_on(protocol.on_request(request, &mut backend, &mut channel)).unwrap(), Response::Message(CloseComplete) )); - assert!(protocol.portals.get("protal1").is_none()); + assert!(!protocol.portals.contains_key("protal1")); } #[test] @@ -2091,7 +2091,7 @@ mod tests { parameter_data_types: vec![], }; block_on(protocol.on_request(parse_request, &mut backend, &mut channel)).unwrap(); - assert!(protocol.prepared_statements.get("prepared1").is_some()); + assert!(protocol.prepared_statements.contains_key("prepared1")); // A prepared statement describe request generates a suitable description. let request = FrontendMessage::Describe { @@ -2171,7 +2171,7 @@ mod tests { parameter_data_types: vec![], }; block_on(protocol.on_request(parse_request, &mut backend, &mut channel)).unwrap(); - assert!(protocol.prepared_statements.get("prepared1").is_some()); + assert!(protocol.prepared_statements.contains_key("prepared1")); let bind_request = FrontendMessage::Bind { prepared_statement_name: bytes_str("prepared1"), @@ -2263,7 +2263,7 @@ mod tests { .on_request(parse_request, &mut backend, &mut channel) .await .unwrap(); - assert!(protocol.prepared_statements.get("prepared1").is_some()); + assert!(protocol.prepared_statements.contains_key("prepared1")); let bind_request = FrontendMessage::Bind { prepared_statement_name: bytes_str("prepared1"), @@ -2343,7 +2343,7 @@ mod tests { parameter_data_types: vec![], }; block_on(protocol.on_request(parse_request, &mut backend, &mut channel)).unwrap(); - assert!(protocol.prepared_statements.get("prepared1").is_some()); + assert!(protocol.prepared_statements.contains_key("prepared1")); let bind_request = FrontendMessage::Bind { prepared_statement_name: bytes_str("prepared1"), @@ -2384,7 +2384,7 @@ mod tests { parameter_data_types: vec![], }; block_on(protocol.on_request(parse_request, &mut backend, &mut channel)).unwrap(); - assert!(protocol.prepared_statements.get("prepared1").is_some()); + assert!(protocol.prepared_statements.contains_key("prepared1")); let bind_request = FrontendMessage::Bind { prepared_statement_name: bytes_str("prepared1"), diff --git a/reader-map/src/read.rs b/reader-map/src/read.rs index 4418599013..458cb49a7a 100644 --- a/reader-map/src/read.rs +++ b/reader-map/src/read.rs @@ -113,10 +113,10 @@ where } /// Internal version of `get_and` - fn get_raw(&self, key: &Q) -> Result>>> + fn get_raw(&self, key: &Q) -> Result>>> where K: Borrow, - Q: Ord + Hash, + Q: Ord + Hash + ?Sized, { let MapReadRef { guard } = self.enter()?; Ok(ReadGuard::try_map(guard, |inner| { @@ -139,10 +139,10 @@ where /// published by the writer. If no publish has happened, or the map has been destroyed, this /// function returns an [`Error`]. #[inline] - pub fn get<'rh, Q: ?Sized>(&'rh self, key: &'_ Q) -> Result>>> + pub fn get<'rh, Q>(&'rh self, key: &'_ Q) -> Result>>> where K: Borrow, - Q: Ord + Hash, + Q: Ord + Hash + ?Sized, { // call `borrow` here to monomorphize `get_raw` fewer times self.get_raw(key.borrow()) @@ -163,10 +163,10 @@ where /// refreshed by the writer. If no refresh has happened, or the map has been destroyed, this /// function returns an [`Error`]. #[inline] - pub fn first<'rh, Q: ?Sized>(&'rh self, key: &'_ Q) -> Result>> + pub fn first<'rh, Q>(&'rh self, key: &'_ Q) -> Result>> where K: Borrow, - Q: Ord + Clone + Hash, + Q: Ord + Clone + Hash + ?Sized, { let vs = if let Some(vs) = self.get_raw(key.borrow())? { vs @@ -189,10 +189,10 @@ where /// function returns an [`Error`]. /// /// If no values exist for the given key, `Ok(None, _)` is returned. - pub fn meta_get(&self, key: &Q) -> Result<(Option>>, M)> + pub fn meta_get(&self, key: &Q) -> Result<(Option>>, M)> where K: Borrow, - Q: Ord + Clone + Hash, + Q: Ord + Clone + Hash + ?Sized, { let MapReadRef { guard } = self.enter()?; let meta = guard.meta.clone(); diff --git a/reader-map/src/read/read_ref.rs b/reader-map/src/read/read_ref.rs index e30dbd46be..1d417a5886 100644 --- a/reader-map/src/read/read_ref.rs +++ b/reader-map/src/read/read_ref.rs @@ -151,10 +151,10 @@ where /// Note that not all writes will be included with this read -- only those that have been /// published by the writer. If no publish has happened, or the map has been destroyed, this /// function returns `None`. - pub fn first<'a, Q: ?Sized>(&'a self, key: &'_ Q) -> Option<&'a V> + pub fn first<'a, Q>(&'a self, key: &'_ Q) -> Option<&'a V> where K: Borrow, - Q: Ord + Hash, + Q: Ord + Hash + ?Sized, { self.guard.data.get(key).and_then(|values| values.first()) } diff --git a/reader-map/tests/proptest.rs b/reader-map/tests/proptest.rs index c212658fad..c834129d74 100644 --- a/reader-map/tests/proptest.rs +++ b/reader-map/tests/proptest.rs @@ -17,10 +17,10 @@ use test_strategy::{proptest, Arbitrary}; const LARGE_VEC_RANGE: Range = 10..1000; -fn set<'a, T: 'a, I>(iter: I) -> HashSet +fn set<'a, T, I>(iter: I) -> HashSet where I: IntoIterator, - T: Copy + Hash + Eq, + T: Copy + Hash + Eq + 'a, { iter.into_iter().cloned().collect() } diff --git a/readyset-adapter/src/backend.rs b/readyset-adapter/src/backend.rs index 56709099fa..2dd44d6d5c 100644 --- a/readyset-adapter/src/backend.rs +++ b/readyset-adapter/src/backend.rs @@ -1573,7 +1573,7 @@ where .ok_or(PreparedStatementMissing { statement_id: id })?; let mut event = QueryExecutionEvent::new(EventType::Execute); - event.query = cached_statement.parsed_query.clone(); + event.query.clone_from(&cached_statement.parsed_query); event.query_id = cached_statement.query_id; let upstream = &mut self.upstream; @@ -2373,7 +2373,7 @@ where if let Err(e) = &res { if let Some(ddl_req) = ddl_req { let remove_res = retry_with_exponential_backoff( - async || { + || async { let ddl_req = ddl_req.clone(); self.authority.remove_cache_ddl_request(ddl_req).await }, @@ -2416,7 +2416,7 @@ where ) { let remove_res = retry_with_exponential_backoff( - async || { + || async { let ddl_req = ddl_req.clone(); self.authority.remove_cache_ddl_request(ddl_req).await }, diff --git a/readyset-adapter/src/backend/noria_connector.rs b/readyset-adapter/src/backend/noria_connector.rs index d9d5b45def..749447842e 100644 --- a/readyset-adapter/src/backend/noria_connector.rs +++ b/readyset-adapter/src/backend/noria_connector.rs @@ -138,9 +138,7 @@ pub enum PreparedSelectTypes { } #[derive(Debug, Clone)] -// Due to differences in data type sizes, the large_enum_variant Clippy warning was being emitted -// for this type, but only when compiling for aarch64 targets. -#[cfg_attr(target_arch = "aarch64", allow(clippy::large_enum_variant))] +#[allow(clippy::large_enum_variant)] pub enum PrepareResult { Select { types: PreparedSelectTypes, diff --git a/readyset-adapter/src/query_status_cache.rs b/readyset-adapter/src/query_status_cache.rs index e8c2107ef9..dacc69d084 100644 --- a/readyset-adapter/src/query_status_cache.rs +++ b/readyset-adapter/src/query_status_cache.rs @@ -653,12 +653,12 @@ impl QueryStatusCache { { let should_insert = q.with_mut_status(self, |s| match s { Some(s) if s.migration_state != MigrationState::Unsupported => { - s.migration_state = status.migration_state.clone(); - s.execution_info = status.execution_info.clone(); + s.migration_state.clone_from(&status.migration_state); + s.execution_info.clone_from(&status.execution_info); false } Some(s) => { - s.execution_info = status.execution_info.clone(); + s.execution_info.clone_from(&status.execution_info); false } None => true, diff --git a/readyset-client/src/controller/rpc.rs b/readyset-client/src/controller/rpc.rs index 592e38f59f..c979732a67 100644 --- a/readyset-client/src/controller/rpc.rs +++ b/readyset-client/src/controller/rpc.rs @@ -16,7 +16,7 @@ use crate::ReadySetHandle; // this alias is needed to work around -> impl Trait capturing _all_ lifetimes by default // the A parameter is needed so it gets captured into the impl Trait -pub type RpcFuture<'a, R: 'a> = impl Future> + 'a; +pub type RpcFuture<'a, R: DeserializeOwned + 'a> = impl Future> + 'a; impl ReadySetHandle { /// Perform a raw RPC request to the HTTP `path` provided, providing a request body `r`. diff --git a/readyset-client/src/lib.rs b/readyset-client/src/lib.rs index 5903a88ae1..350be21672 100644 --- a/readyset-client/src/lib.rs +++ b/readyset-client/src/lib.rs @@ -142,7 +142,6 @@ result_flattening, type_alias_impl_trait, stmt_expr_attributes, - bound_map, bound_as_ref, box_into_inner, is_sorted, @@ -258,21 +257,21 @@ pub mod consistency; mod controller; pub mod metrics; pub mod query; +pub mod recipe; pub mod status; mod table; pub mod utils; mod view; -use std::convert::TryFrom; -use std::default::Default; -pub mod recipe; #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub mod consensus; #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 pub mod internal; -// for the row! macro +use std::convert::TryFrom; +use std::default::Default; use std::future::Future; +use std::hash::Hash; use std::pin::Pin; pub use nom_sql::{ColumnConstraint, SqlIdentifier}; @@ -435,8 +434,10 @@ pub fn shard_by(dt: &DfValue, shards: usize) -> usize { DfValue::Int(n) => n as usize % shards, DfValue::UnsignedInt(n) => n as usize % shards, DfValue::Text(..) | DfValue::TinyText(..) | DfValue::TimestampTz(_) => { - use std::hash::Hasher; - let mut hasher = ahash::AHasher::new_with_keys(0x3306, 0x6033); + use std::hash::{BuildHasher, Hasher}; + + let mut hasher = + ahash::RandomState::with_seeds(0x3306, 0x6033, 0x5432, 0x6034).build_hasher(); // this unwrap should be safe because there are no error paths with a Text, TinyText, // nor Timestamp converting to Text #[allow(clippy::unwrap_used)] @@ -459,10 +460,8 @@ pub fn shard_by(dt: &DfValue, shards: usize) -> usize { | DfValue::BitVector(_) | DfValue::Array(_) | DfValue::PassThrough(_) => { - use std::hash::{Hash, Hasher}; - let mut hasher = ahash::AHasher::new_with_keys(0x3306, 0x6033); - dt.hash(&mut hasher); - hasher.finish() as usize % shards + let hash = ahash::RandomState::with_seeds(0x3306, 0x6033, 0x5432, 0x6034).hash_one(dt); + hash as usize % shards } } } diff --git a/readyset-client/src/view/results.rs b/readyset-client/src/view/results.rs index c54c52fb65..f4f04579e4 100644 --- a/readyset-client/src/view/results.rs +++ b/readyset-client/src/view/results.rs @@ -182,7 +182,7 @@ impl ResultIterator { debug_assert!(data .iter() - .all(|s| { s.is_sorted_by(|a, b| Some(comparator.cmp(a, b))) })); + .all(|s| { s.is_sorted_by(|a, b| comparator.cmp(a, b).is_le()) })); ResultIteratorInner::MultiKeyMerge(MergeIterator::new(data, comparator)) } @@ -208,7 +208,7 @@ impl ResultIterator { debug_assert!(data .iter() - .all(|s| { s.is_sorted_by(|a, b| Some(comparator.cmp(a, b))) })); + .all(|s| { s.is_sorted_by(|a, b| comparator.cmp(a, b).is_le()) })); ResultIteratorInner::MultiKeyAggregateMerge(AggregateIterator { inner: Box::new(ResultIteratorInner::MultiKeyMerge(MergeIterator::new( @@ -234,7 +234,7 @@ impl ResultIterator { debug_assert!(data .iter() - .all(|s| { s.is_sorted_by(|a, b| Some(comparator.cmp(a, b))) })); + .all(|s| { s.is_sorted_by(|a, b| comparator.cmp(a, b).is_le()) })); let temp_iter = ResultIterator { inner: ResultIteratorInner::MultiKeyAggregateMerge(AggregateIterator { diff --git a/readyset-clustertest/src/readyset.rs b/readyset-clustertest/src/readyset.rs index 7a311d3344..cfa4a1a927 100644 --- a/readyset-clustertest/src/readyset.rs +++ b/readyset-clustertest/src/readyset.rs @@ -757,7 +757,7 @@ async fn startup_permutations() { // FIXME[ENG-1668]: Either the system cannot startup healthily for the following situations, or // there are bugs in the failure injection we are doing in tests. - let known_failures = vec![ + let known_failures = [ vec![Authority, Adapter, Server, Upstream], vec![Upstream, Adapter, Server, Authority], vec![Adapter, Authority, Server, Upstream], diff --git a/readyset-common/src/lib.rs b/readyset-common/src/lib.rs index a7d7dc415c..253bb0a3d7 100644 --- a/readyset-common/src/lib.rs +++ b/readyset-common/src/lib.rs @@ -1,5 +1,4 @@ #![deny(macro_use_extern_crate)] -#![feature(bound_map)] mod local; mod records; diff --git a/readyset-common/src/records.rs b/readyset-common/src/records.rs index 91beec4ee3..943ef26830 100644 --- a/readyset-common/src/records.rs +++ b/readyset-common/src/records.rs @@ -121,10 +121,10 @@ impl<'a> IntoIterator for &'a Records { pub struct Records(Vec); impl Records { - pub fn has(&self, q: &Q, positive: bool) -> bool + pub fn has(&self, q: &Q, positive: bool) -> bool where Vec: Borrow, - Q: Eq, + Q: Eq + ?Sized, { self.iter().any(|r| match r { Record::Positive(ref r) if positive => r.borrow() == q, @@ -133,18 +133,18 @@ impl Records { }) } - pub fn has_positive(&self, q: &Q) -> bool + pub fn has_positive(&self, q: &Q) -> bool where Vec: Borrow, - Q: Eq, + Q: Eq + ?Sized, { self.has(q, true) } - pub fn has_negative(&self, q: &Q) -> bool + pub fn has_negative(&self, q: &Q) -> bool where Vec: Borrow, - Q: Eq, + Q: Eq + ?Sized, { self.has(q, false) } diff --git a/readyset-data/src/lib.rs b/readyset-data/src/lib.rs index 87cbb1fb17..d3cf7a7830 100644 --- a/readyset-data/src/lib.rs +++ b/readyset-data/src/lib.rs @@ -235,9 +235,9 @@ impl DfValue { ), DfValue::Float(..) => DfValue::Float(f32::MIN), DfValue::Double(..) => DfValue::Double(f64::MIN), - DfValue::Int(_) => DfValue::Int(i64::min_value()), + DfValue::Int(_) => DfValue::Int(i64::MIN), DfValue::UnsignedInt(_) => DfValue::UnsignedInt(0), - DfValue::Time(_) => DfValue::Time(MySqlTime::min_value()), + DfValue::Time(_) => DfValue::Time(MySqlTime::MIN), DfValue::ByteArray(_) => DfValue::ByteArray(Arc::new(Vec::new())), DfValue::Numeric(_) => DfValue::from(Decimal::MIN), DfValue::BitVector(_) => DfValue::from(BitVec::new()), @@ -265,9 +265,9 @@ impl DfValue { ), DfValue::Float(..) => DfValue::Float(f32::MAX), DfValue::Double(..) => DfValue::Double(f64::MIN), - DfValue::Int(_) => DfValue::Int(i64::max_value()), - DfValue::UnsignedInt(_) => DfValue::UnsignedInt(u64::max_value()), - DfValue::Time(_) => DfValue::Time(MySqlTime::max_value()), + DfValue::Int(_) => DfValue::Int(i64::MAX), + DfValue::UnsignedInt(_) => DfValue::UnsignedInt(u64::MAX), + DfValue::Time(_) => DfValue::Time(MySqlTime::MAX), DfValue::Numeric(_) => DfValue::from(Decimal::MAX), DfValue::TinyText(_) | DfValue::Text(_) @@ -2998,17 +2998,17 @@ mod tests { #[test] fn data_type_conversion() { - let bigint_i64_min = DfValue::Int(std::i64::MIN); - let bigint_i32_min = DfValue::Int(std::i32::MIN as i64); - let bigint_u32_min = DfValue::Int(std::u32::MIN as i64); - let bigint_i32_max = DfValue::Int(std::i32::MAX as i64); - let bigint_u32_max = DfValue::Int(std::u32::MAX as i64); - let bigint_i64_max = DfValue::Int(std::i64::MAX); - let ubigint_u32_min = DfValue::UnsignedInt(std::u32::MIN as u64); - let ubigint_i32_max = DfValue::UnsignedInt(std::i32::MAX as u64); - let ubigint_u32_max = DfValue::UnsignedInt(std::u32::MAX as u64); - let ubigint_i64_max = DfValue::UnsignedInt(std::i64::MAX as u64); - let ubigint_u64_max = DfValue::UnsignedInt(std::u64::MAX); + let bigint_i64_min = DfValue::Int(i64::MIN); + let bigint_i32_min = DfValue::Int(i32::MIN as i64); + let bigint_u32_min = DfValue::Int(u32::MIN as i64); + let bigint_i32_max = DfValue::Int(i32::MAX as i64); + let bigint_u32_max = DfValue::Int(u32::MAX as i64); + let bigint_i64_max = DfValue::Int(i64::MAX); + let ubigint_u32_min = DfValue::UnsignedInt(u32::MIN as u64); + let ubigint_i32_max = DfValue::UnsignedInt(i32::MAX as u64); + let ubigint_u32_max = DfValue::UnsignedInt(u32::MAX as u64); + let ubigint_i64_max = DfValue::UnsignedInt(i64::MAX as u64); + let ubigint_u64_max = DfValue::UnsignedInt(u64::MAX); fn _data_type_conversion_test_eq_i32(d: &DfValue) { assert_eq!( diff --git a/readyset-data/src/serde.rs b/readyset-data/src/serde.rs index c9901d0c00..5e398c0ac5 100644 --- a/readyset-data/src/serde.rs +++ b/readyset-data/src/serde.rs @@ -161,64 +161,65 @@ impl serde::ser::Serialize for DfValue { } } -impl<'de> Deserialize<'de> for DfValue { - fn deserialize(deserializer: D) -> Result +struct FieldVisitor; + +impl<'de> serde::de::Visitor<'de> for FieldVisitor { + type Value = Variant; + fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("variant identifier") + } + + fn visit_u64(self, val: u64) -> Result where - D: serde::Deserializer<'de>, + E: serde::de::Error, { - struct FieldVisitor; - impl<'de> serde::de::Visitor<'de> for FieldVisitor { - type Value = Variant; - fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("variant identifier") - } - - fn visit_u64(self, val: u64) -> Result - where - E: serde::de::Error, - { - if let Some(f) = Variant::from_repr(val as _) { - Ok(f) - } else { - Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Unsigned(val), - &"variant index 0 <= i < 11", - )) - } - } + if let Some(f) = Variant::from_repr(val as _) { + Ok(f) + } else { + Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Unsigned(val), + &"variant index 0 <= i < 11", + )) + } + } - fn visit_str(self, val: &str) -> Result - where - E: serde::de::Error, - { - val.parse() - .map_err(|_| serde::de::Error::unknown_variant(val, Variant::VARIANTS)) - } + fn visit_str(self, val: &str) -> Result + where + E: serde::de::Error, + { + val.parse() + .map_err(|_| serde::de::Error::unknown_variant(val, Variant::VARIANTS)) + } - fn visit_bytes(self, val: &[u8]) -> Result - where - E: serde::de::Error, - { - match std::str::from_utf8(val).map(|s| s.parse()) { - Ok(Ok(field)) => Ok(field), - _ => Err(serde::de::Error::unknown_variant( - &String::from_utf8_lossy(val), - Variant::VARIANTS, - )), - } - } + fn visit_bytes(self, val: &[u8]) -> Result + where + E: serde::de::Error, + { + match std::str::from_utf8(val).map(|s| s.parse()) { + Ok(Ok(field)) => Ok(field), + _ => Err(serde::de::Error::unknown_variant( + &String::from_utf8_lossy(val), + Variant::VARIANTS, + )), } + } +} - impl<'de> serde::Deserialize<'de> for Variant { - #[inline] - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - serde::Deserializer::deserialize_identifier(deserializer, FieldVisitor) - } - } +impl<'de> serde::Deserialize<'de> for Variant { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + serde::Deserializer::deserialize_identifier(deserializer, FieldVisitor) + } +} +impl<'de> Deserialize<'de> for DfValue { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = DfValue; diff --git a/readyset-dataflow/src/domain/channel/tcp.rs b/readyset-dataflow/src/domain/channel/tcp.rs index 0207a110a5..ef83c8b8e4 100644 --- a/readyset-dataflow/src/domain/channel/tcp.rs +++ b/readyset-dataflow/src/domain/channel/tcp.rs @@ -120,7 +120,7 @@ impl TcpSender { // end, or else we'll silently get the wrong data (but no deserialization errors!) // https://app.clubhouse.io/readysettech/story/437 to fix that let c = bincode::options() - .with_limit(u32::max_value() as u64) + .with_limit(u32::MAX as u64) .allow_trailing_bytes(); let size = c .serialized_size(packet) diff --git a/readyset-dataflow/src/domain/mod.rs b/readyset-dataflow/src/domain/mod.rs index 2dbaaa3697..f14b766e4c 100644 --- a/readyset-dataflow/src/domain/mod.rs +++ b/readyset-dataflow/src/domain/mod.rs @@ -4003,10 +4003,10 @@ impl Domain { #[allow(clippy::indexing_slicing)] // nodes in replay paths must exist if let Some(result) = state[dest.node].evict_keys(tag, &keys) { bytes_freed += result.bytes_freed; - #[allow(clippy::unwrap_used)] // we can only evict from partial replay paths, so we must have a // partial key bytes_freed += trigger_downstream_evictions( + #[allow(clippy::unwrap_used)] dest.partial_index.as_ref().unwrap(), &keys, dest.node, diff --git a/readyset-dataflow/src/lib.rs b/readyset-dataflow/src/lib.rs index a3a08cec20..28be878e91 100644 --- a/readyset-dataflow/src/lib.rs +++ b/readyset-dataflow/src/lib.rs @@ -9,7 +9,6 @@ trait_alias, btree_extract_if, bound_as_ref, - bound_map, stmt_expr_attributes, extract_if, hash_extract_if, diff --git a/readyset-dataflow/src/node/ntype.rs b/readyset-dataflow/src/node/ntype.rs index 96acf8bcf5..b18c258d23 100644 --- a/readyset-dataflow/src/node/ntype.rs +++ b/readyset-dataflow/src/node/ntype.rs @@ -1,3 +1,5 @@ +use std::fmt; + use serde::{Deserialize, Serialize}; use crate::node::special; @@ -67,17 +69,17 @@ impl NodeType { } } -impl ToString for NodeType { - fn to_string(&self) -> String { +impl fmt::Display for NodeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - NodeType::Ingress => "Ingress".to_string(), - NodeType::Base(_) => "Base".to_string(), - NodeType::Internal(o) => format!("Internal ({})", o.to_string()), - NodeType::Egress(_) => "Egress".to_string(), - NodeType::Sharder(_) => "Sharder".to_string(), - NodeType::Reader(_) => "Reader".to_string(), - NodeType::Source => "Source".to_string(), - NodeType::Dropped => "Dropped".to_string(), + NodeType::Ingress => write!(f, "Ingress"), + NodeType::Base(_) => write!(f, "Base"), + NodeType::Internal(o) => write!(f, "Internal ({})", o), + NodeType::Egress(_) => write!(f, "Egress"), + NodeType::Sharder(_) => write!(f, "Sharder"), + NodeType::Reader(_) => write!(f, "Reader"), + NodeType::Source => write!(f, "Source"), + NodeType::Dropped => write!(f, "Dropped"), } } } diff --git a/readyset-dataflow/src/ops/filter.rs b/readyset-dataflow/src/ops/filter.rs index 1b55e003c9..aa9fdb3a7b 100644 --- a/readyset-dataflow/src/ops/filter.rs +++ b/readyset-dataflow/src/ops/filter.rs @@ -137,10 +137,6 @@ impl Ingredient for Filter { IngredientLookupResult::Miss => Ok(IngredientLookupResult::Miss), } } - - fn is_selective(&self) -> bool { - true - } } #[cfg(test)] diff --git a/readyset-dataflow/src/ops/grouped/concat.rs b/readyset-dataflow/src/ops/grouped/concat.rs index eb021f1930..f20bb2ebb6 100644 --- a/readyset-dataflow/src/ops/grouped/concat.rs +++ b/readyset-dataflow/src/ops/grouped/concat.rs @@ -184,7 +184,7 @@ impl GroupedOperation for GroupConcat { write!(&mut out_str, "{}", self.separator).unwrap(); } } - prev_state.string_repr = out_str.clone(); + prev_state.string_repr.clone_from(&out_str); last_state.insert(group, prev_state); Ok(Some(out_str.into())) } diff --git a/readyset-dataflow/src/ops/grouped/mod.rs b/readyset-dataflow/src/ops/grouped/mod.rs index e647d85c2b..d569bcf7eb 100644 --- a/readyset-dataflow/src/ops/grouped/mod.rs +++ b/readyset-dataflow/src/ops/grouped/mod.rs @@ -451,8 +451,4 @@ where fn description(&self, detailed: bool) -> String { self.inner.description(detailed) } - - fn is_selective(&self) -> bool { - true - } } diff --git a/readyset-dataflow/src/ops/mod.rs b/readyset-dataflow/src/ops/mod.rs index c08215a3f5..048c3a372a 100644 --- a/readyset-dataflow/src/ops/mod.rs +++ b/readyset-dataflow/src/ops/mod.rs @@ -1,4 +1,5 @@ use std::collections::{HashMap, HashSet}; +use std::fmt; use std::time::Instant; use dataflow_state::PointKey; @@ -59,21 +60,20 @@ pub enum NodeOperator { TopK(topk::TopK), } -impl ToString for NodeOperator { - fn to_string(&self) -> String { +impl fmt::Display for NodeOperator { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - NodeOperator::Aggregation(_) => "Aggregation", - NodeOperator::Extremum(_) => "Extremum", - NodeOperator::Concat(_) => "Concat", - NodeOperator::Join(_) => "Join", - NodeOperator::Paginate(_) => "Paginate", - NodeOperator::Project(_) => "Project", - NodeOperator::Union(_) => "Union", - NodeOperator::Identity(_) => "Identity", - NodeOperator::Filter(_) => "Filter", - NodeOperator::TopK(_) => "TopK", + NodeOperator::Aggregation(_) => write!(f, "Aggregation"), + NodeOperator::Extremum(_) => write!(f, "Extremum"), + NodeOperator::Concat(_) => write!(f, "Concat"), + NodeOperator::Join(_) => write!(f, "Join"), + NodeOperator::Paginate(_) => write!(f, "Paginate"), + NodeOperator::Project(_) => write!(f, "Project"), + NodeOperator::Union(_) => write!(f, "Union"), + NodeOperator::Identity(_) => write!(f, "Identity"), + NodeOperator::Filter(_) => write!(f, "Filter"), + NodeOperator::TopK(_) => write!(f, "TopK"), } - .to_string() } } @@ -221,9 +221,6 @@ impl Ingredient for NodeOperator { ) -> ReadySetResult> { impl_ingredient_fn_ref!(self, lookup, parent, columns, key, domain, states, mode) } - fn is_selective(&self) -> bool { - impl_ingredient_fn_ref!(self, is_selective,) - } fn requires_full_materialization(&self) -> bool { impl_ingredient_fn_ref!(self, requires_full_materialization,) } diff --git a/readyset-dataflow/src/ops/paginate.rs b/readyset-dataflow/src/ops/paginate.rs index 654f80c91e..90053aefa0 100644 --- a/readyset-dataflow/src/ops/paginate.rs +++ b/readyset-dataflow/src/ops/paginate.rs @@ -360,10 +360,6 @@ impl Ingredient for Paginate { self.order ) } - - fn is_selective(&self) -> bool { - true - } } #[cfg(test)] diff --git a/readyset-dataflow/src/ops/topk.rs b/readyset-dataflow/src/ops/topk.rs index a9c289ee03..a208d4295b 100644 --- a/readyset-dataflow/src/ops/topk.rs +++ b/readyset-dataflow/src/ops/topk.rs @@ -459,10 +459,6 @@ impl Ingredient for TopK { self.order ) } - - fn is_selective(&self) -> bool { - true - } } #[cfg(test)] diff --git a/readyset-dataflow/src/ops/union.rs b/readyset-dataflow/src/ops/union.rs index f9e209d845..f74325cbf3 100644 --- a/readyset-dataflow/src/ops/union.rs +++ b/readyset-dataflow/src/ops/union.rs @@ -1010,8 +1010,8 @@ impl Ingredient for Union { }..=BufferedReplayKey { tag, key: key.clone(), - requesting_shard: usize::max_value(), - requesting_replica: usize::max_value(), + requesting_shard: usize::MAX, + requesting_replica: usize::MAX, }, ) { if e.buffered.contains_key(&from) { diff --git a/readyset-dataflow/src/payload.rs b/readyset-dataflow/src/payload.rs index 86ce88e6be..25976d6f7b 100644 --- a/readyset-dataflow/src/payload.rs +++ b/readyset-dataflow/src/payload.rs @@ -664,20 +664,19 @@ impl Packet { } } -impl ToString for Packet { - fn to_string(&self) -> String { +impl fmt::Display for Packet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Packet::Input { .. } => "Input", - Packet::Message { .. } => "Message", - Packet::RequestReaderReplay { .. } => "RequestReaderReplay", - Packet::RequestPartialReplay { .. } => "RequestPartialReplay", - Packet::ReplayPiece { .. } => "ReplayPiece", - Packet::Timestamp { .. } => "Timestamp", - Packet::Finish { .. } => "Finish", - Packet::Spin { .. } => "Spin", - Packet::Evict { .. } => "Evict", + Packet::Input { .. } => write!(f, "Input"), + Packet::Message { .. } => write!(f, "Message"), + Packet::RequestReaderReplay { .. } => write!(f, "RequestReaderReplay"), + Packet::RequestPartialReplay { .. } => write!(f, "RequestPartialReplay"), + Packet::ReplayPiece { .. } => write!(f, "ReplayPiece"), + Packet::Timestamp { .. } => write!(f, "Timestamp"), + Packet::Finish { .. } => write!(f, "Finish"), + Packet::Spin { .. } => write!(f, "Spin"), + Packet::Evict { .. } => write!(f, "Evict"), } - .to_string() } } diff --git a/readyset-dataflow/src/processing.rs b/readyset-dataflow/src/processing.rs index 007824b068..6fce5b9d9f 100644 --- a/readyset-dataflow/src/processing.rs +++ b/readyset-dataflow/src/processing.rs @@ -817,11 +817,6 @@ where } } - /// Performance hint: should return true if this operator reduces the size of its input - fn is_selective(&self) -> bool { - false - } - /// Returns true if this operator requires a full materialization fn requires_full_materialization(&self) -> bool { false diff --git a/readyset-mir/src/rewrite/predicate_pushup.rs b/readyset-mir/src/rewrite/predicate_pushup.rs index 2464c4f696..7c5dc14a9b 100644 --- a/readyset-mir/src/rewrite/predicate_pushup.rs +++ b/readyset-mir/src/rewrite/predicate_pushup.rs @@ -186,7 +186,7 @@ fn map_columns_above_alias_table( .get(&column.name) .ok_or_else(|| unsupported_err!("Parent of AliasTable node missing required column"))?; - column.table = new_column.table.clone(); + column.table.clone_from(&new_column.table); } Ok(()) diff --git a/readyset-mysql/tests/vertical.rs b/readyset-mysql/tests/vertical.rs index 7d60b27bf2..6fc70bf849 100644 --- a/readyset-mysql/tests/vertical.rs +++ b/readyset-mysql/tests/vertical.rs @@ -389,7 +389,7 @@ where } => { let table_rows = self.rows.entry(table).or_default(); let old_state_row = table_rows.iter_mut().find(|r| *r == old_row).unwrap(); - *old_state_row = new_row.clone(); + old_state_row.clone_from(new_row); } Operation::Delete { table, row } => { let table_rows = self.rows.entry(table).or_default(); diff --git a/readyset-server/src/controller/migrate/materialization/mod.rs b/readyset-server/src/controller/migrate/materialization/mod.rs index a3e26fb055..a68975db60 100644 --- a/readyset-server/src/controller/migrate/materialization/mod.rs +++ b/readyset-server/src/controller/migrate/materialization/mod.rs @@ -429,7 +429,7 @@ impl Materializations { ); mi = parent; indices = map_lookup_indices(m, mi, &indices)?; - #[allow(clippy::indexing_slicing)] // graph must contain mi + // #[allow(clippy::indexing_slicing)] // graph must contain mi m = &graph[mi]; } @@ -743,7 +743,7 @@ impl Materializations { self.partial.contains(&pi), "attempting to place full materialization beyond materialization frontier" ); - #[allow(clippy::unwrap_used)] // graph must contain pi + // #[allow(clippy::unwrap_used)] // graph must contain pi graph.node_weight_mut(pi).unwrap().purge = true; } } diff --git a/readyset-server/src/controller/migrate/mod.rs b/readyset-server/src/controller/migrate/mod.rs index 641da76af1..60dd5315ff 100644 --- a/readyset-server/src/controller/migrate/mod.rs +++ b/readyset-server/src/controller/migrate/mod.rs @@ -774,7 +774,7 @@ impl<'df> Migration<'df> { node = ni.index(), "marking node as beyond materialization frontier" ); - #[allow(clippy::unwrap_used)] // ni must belong to the graph + // #[allow(clippy::unwrap_used)] // ni must belong to the graph self.dataflow_state .ingredients .node_weight_mut(ni) diff --git a/readyset-server/src/controller/mod.rs b/readyset-server/src/controller/mod.rs index 340b3f3877..096571e641 100644 --- a/readyset-server/src/controller/mod.rs +++ b/readyset-server/src/controller/mod.rs @@ -707,7 +707,7 @@ impl Controller { } req = self.replicator_channel.receiver.recv() => { fn now() -> u64 { - #[allow(clippy::unwrap_used)] // won't error comparing to UNIX EPOCH + // #[allow(clippy::unwrap_used)] // won't error comparing to UNIX EPOCH SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64 } diff --git a/readyset-server/src/controller/sql/query_signature.rs b/readyset-server/src/controller/sql/query_signature.rs index d528caf6c5..c91d2d66d7 100644 --- a/readyset-server/src/controller/sql/query_signature.rs +++ b/readyset-server/src/controller/sql/query_signature.rs @@ -12,21 +12,19 @@ pub trait Signature { // TODO: Change relations to Hashset<&'a Relation> #[derive(Clone, Debug)] -pub struct QuerySignature<'a> { - pub relations: HashSet<&'a str>, - pub attributes: HashSet<&'a Column>, +pub struct QuerySignature { pub hash: u64, } -impl<'a> PartialEq for QuerySignature<'a> { +impl PartialEq for QuerySignature { fn eq(&self, other: &QuerySignature) -> bool { self.hash == other.hash } } -impl<'a> Eq for QuerySignature<'a> {} +impl Eq for QuerySignature {} -impl<'a> Hash for QuerySignature<'a> { +impl Hash for QuerySignature { fn hash(&self, state: &mut H) where H: Hasher, @@ -51,7 +49,6 @@ impl Signature for QueryGraph { use std::collections::hash_map::DefaultHasher; let mut hasher = DefaultHasher::new(); - let rels = self.relations.keys().map(|r| r.name.as_str()).collect(); // Compute relations part of hash let mut r_vec: Vec<_> = self.relations.keys().collect(); @@ -129,8 +126,6 @@ impl Signature for QueryGraph { } QuerySignature { - relations: rels, - attributes: attrs, hash: hasher.finish(), } } diff --git a/readyset-server/src/controller/sql/recipe/mod.rs b/readyset-server/src/controller/sql/recipe/mod.rs index a3c01b575d..5c11771f59 100644 --- a/readyset-server/src/controller/sql/recipe/mod.rs +++ b/readyset-server/src/controller/sql/recipe/mod.rs @@ -1,6 +1,6 @@ use std::{fmt, str}; -use nom_sql::{Relation, SelectStatement, SqlIdentifier}; +use nom_sql::{Relation, SelectStatement}; use petgraph::graph::NodeIndex; use readyset_client::recipe::changelist::ChangeList; use readyset_client::ViewCreateRequest; @@ -85,7 +85,7 @@ impl PartialEq for Recipe { #[derive(Debug)] pub(crate) enum Schema<'a> { Table(&'a BaseSchema), - View(&'a [SqlIdentifier]), + View, } impl Recipe { @@ -174,7 +174,7 @@ impl Recipe { None => self.inc.get_view_schema(name), Some(internal_qn) => self.inc.get_view_schema(internal_qn), }; - s.map(Schema::View) + s.map(|_| Schema::View) } Some(s) => Some(Schema::Table(s)), } diff --git a/readyset-server/src/controller/state.rs b/readyset-server/src/controller/state.rs index 4975d3d9e2..90a3127e1b 100644 --- a/readyset-server/src/controller/state.rs +++ b/readyset-server/src/controller/state.rs @@ -1818,8 +1818,12 @@ impl DfState { self.workers = Default::default(); let mut new_materializations = Materializations::new(); - new_materializations.paths = self.materializations.paths.clone(); - new_materializations.redundant_partial = self.materializations.redundant_partial.clone(); + new_materializations + .paths + .clone_from(&self.materializations.paths); + new_materializations + .redundant_partial + .clone_from(&self.materializations.redundant_partial); new_materializations.tag_generator = self.materializations.tag_generator; new_materializations.config = self.materializations.config.clone(); diff --git a/readyset-server/src/http_router.rs b/readyset-server/src/http_router.rs index 57eeff236d..d6cf82b5e7 100644 --- a/readyset-server/src/http_router.rs +++ b/readyset-server/src/http_router.rs @@ -12,7 +12,6 @@ use hyper::header::CONTENT_TYPE; use hyper::service::make_service_fn; use hyper::{self, Body, Method, Request, Response, StatusCode}; use readyset_alloc::{dump_stats, memory_and_per_thread_stats}; -use readyset_client::consensus::Authority; use readyset_client::metrics::recorded; use readyset_errors::ReadySetError; use readyset_util::shutdown::ShutdownReceiver; @@ -41,8 +40,6 @@ pub struct NoriaServerHttpRouter { pub worker_tx: Sender, /// Channel to the running `Controller`. pub controller_tx: Sender, - /// The `Authority` used inside the server. - pub authority: Arc, /// Used to record and report the servers current health. pub health_reporter: HealthReporter, /// Used to communicate externally that a failpoint request has been received and successfully diff --git a/readyset-server/src/startup.rs b/readyset-server/src/startup.rs index 6a95f77d26..006b867abc 100644 --- a/readyset-server/src/startup.rs +++ b/readyset-server/src/startup.rs @@ -187,7 +187,6 @@ async fn start_controller( } async fn start_request_router( - authority: Arc, listen_addr: IpAddr, external_addr: SocketAddr, worker_tx: Sender, @@ -202,7 +201,6 @@ async fn start_request_router( port: external_addr.port(), worker_tx: worker_tx.clone(), controller_tx, - authority: authority.clone(), health_reporter: health_reporter.clone(), failpoint_channel, }; @@ -279,7 +277,6 @@ pub(crate) async fn start_instance_inner( let (tx, rx) = maybe_create_failpoint_chann(wait_for_failpoint); let mut health_reporter = HealthReporter::new(); let http_uri = start_request_router( - authority.clone(), listen_addr, external_addr, worker_tx.clone(), diff --git a/readyset-sql-passes/src/implied_tables.rs b/readyset-sql-passes/src/implied_tables.rs index 41723d19dd..b97dc45338 100644 --- a/readyset-sql-passes/src/implied_tables.rs +++ b/readyset-sql-passes/src/implied_tables.rs @@ -181,7 +181,7 @@ impl<'ast, 'schema> VisitorMut<'ast> for ExpandImpliedTablesVisitor<'schema> { } if let Some(t) = matches.first() { - table.schema = t.schema.clone(); + table.schema.clone_from(&t.schema); } } else { column.table = self.find_table(&column.name); diff --git a/readyset-telemetry-reporter/Cargo.toml b/readyset-telemetry-reporter/Cargo.toml index 041eda2cfc..05efe1ce5c 100644 --- a/readyset-telemetry-reporter/Cargo.toml +++ b/readyset-telemetry-reporter/Cargo.toml @@ -26,7 +26,7 @@ blake2= "0.10" readyset-version = { path = "../readyset-version" } -[dev_dependencies] +[dev-dependencies] tokio = { workspace = true, features = ["full", "test-util"] } [features] diff --git a/readyset-util/src/lib.rs b/readyset-util/src/lib.rs index befbba1068..47501d87a1 100644 --- a/readyset-util/src/lib.rs +++ b/readyset-util/src/lib.rs @@ -1,7 +1,7 @@ //! This crate provides miscellaneous utilities and extensions to the Rust standard library, for use //! in all crates in this workspace. #![deny(missing_docs, rustdoc::missing_crate_level_docs)] -#![feature(step_trait, bound_as_ref, bound_map, rustc_attrs)] +#![feature(step_trait, bound_as_ref, rustc_attrs)] #![allow(internal_features)] use std::borrow::Borrow; use std::collections::{BTreeMap, HashMap}; diff --git a/readyset-util/src/nonmaxusize.rs b/readyset-util/src/nonmaxusize.rs index 42d0483419..3c824b43b9 100644 --- a/readyset-util/src/nonmaxusize.rs +++ b/readyset-util/src/nonmaxusize.rs @@ -50,7 +50,9 @@ mod test { std::mem::size_of::() ); - assert!(unsafe { Some(NonMaxUsize(usize::MAX)) }.is_none()); + let expected_none = unsafe { Some(NonMaxUsize(usize::MAX)) }; + assert!(expected_none.is_none()); + assert!(unsafe { Some(NonMaxUsize(usize::MAX - 1)) }.is_some()); assert!(unsafe { Some(NonMaxUsize(0)) }.is_some()); } diff --git a/replicators/src/db_util.rs b/replicators/src/db_util.rs index f8883533a4..212d8d7aea 100644 --- a/replicators/src/db_util.rs +++ b/replicators/src/db_util.rs @@ -1,6 +1,7 @@ //! Database Utilities //! Contains helpers for determining the schemas and tables of a database for use in replication use std::collections::HashMap; +use std::fmt; use nom_sql::{Dialect, DialectDisplay}; use readyset_errors::ReadySetError; @@ -54,9 +55,9 @@ impl DatabaseSchemas { } } -impl ToString for DatabaseSchemas { - fn to_string(&self) -> String { - format!("{:?}", self) +impl fmt::Display for DatabaseSchemas { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) } } @@ -163,9 +164,9 @@ impl CreateSchema { } } -impl ToString for CreateSchema { - fn to_string(&self) -> String { - format!("{self:?}") +impl fmt::Display for CreateSchema { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{self:?}") } } diff --git a/replicators/src/postgres_connector/ddl_replication.rs b/replicators/src/postgres_connector/ddl_replication.rs index 9f3f552f9a..e65109d857 100644 --- a/replicators/src/postgres_connector/ddl_replication.rs +++ b/replicators/src/postgres_connector/ddl_replication.rs @@ -184,6 +184,7 @@ pub(crate) struct DdlEvent { /// This is just an ugly wrapper because `deserialize_with` doesn't play well with Options #[derive(Debug, Deserialize, Clone)] +#[allow(dead_code)] pub(crate) struct ParsedStatement(#[serde(deserialize_with = "parse_pgsql")] SqlQuery); fn parse_pgsql<'de, D>(deserializer: D) -> Result diff --git a/replicators/src/postgres_connector/snapshot.rs b/replicators/src/postgres_connector/snapshot.rs index 70b2c05c47..912b100781 100644 --- a/replicators/src/postgres_connector/snapshot.rs +++ b/replicators/src/postgres_connector/snapshot.rs @@ -64,6 +64,7 @@ enum ConstraintKind { PrimaryKey, UniqueKey, ForeignKey, + #[allow(dead_code)] Other(u8), } diff --git a/replicators/tests/ddl_vertical.rs b/replicators/tests/ddl_vertical.rs index c4a5e9390e..3a6828a795 100644 --- a/replicators/tests/ddl_vertical.rs +++ b/replicators/tests/ddl_vertical.rs @@ -661,7 +661,7 @@ impl ModelState for DDLModelState { .iter_mut() .find(|cs| cs.name == *col_name) .unwrap(); - spec.name = new_name.clone(); + spec.name.clone_from(new_name); } Operation::DeleteRow(..) => (), Operation::CreateSimpleView { name, table_source } => { @@ -731,7 +731,7 @@ impl ModelState for DDLModelState { .iter_mut() .find(|v| *v == value_name) .unwrap(); - *val_ref = new_name.clone(); + val_ref.clone_from(new_name); } Operation::Evict { .. } => (), } diff --git a/replicators/tests/tests.rs b/replicators/tests/tests.rs index dc8ae0cf50..09f47340cc 100644 --- a/replicators/tests/tests.rs +++ b/replicators/tests/tests.rs @@ -143,6 +143,7 @@ impl TestChannel { } } +#[allow(dead_code)] /// Channel used to send notifications from the controller to replicator. struct TestControllChannel(UnboundedSender); diff --git a/rust-toolchain b/rust-toolchain index 0e2ac63c79..3548c285ac 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2023-12-28 +nightly-2024-05-02