diff --git a/CODE_STYLE.md b/CODE_STYLE.md index 4a03376bb81..5e5914de46c 100644 --- a/CODE_STYLE.md +++ b/CODE_STYLE.md @@ -109,6 +109,16 @@ These assert will not be part of the release binary and won't hurt the execution **example needed** +## Errors + +Error messages should be concise, lowercase (except proper names), and without trailing punctuation. + +### Examples +- "failed to start actor runtimes" +- "cannot join PostgreSQL URI {} with path {:?}" +- "could not find split metadata in Metastore {}" +- "unkown output format {:?}" + ## Comments We use on the same code style, [rustc's doc comments](https://doc.rust-lang.org/1.0.0/style/style/comments.html). diff --git a/quickwit/quickwit-actors/src/actor.rs b/quickwit/quickwit-actors/src/actor.rs index 2d195272977..92990485996 100644 --- a/quickwit/quickwit-actors/src/actor.rs +++ b/quickwit/quickwit-actors/src/actor.rs @@ -44,21 +44,21 @@ pub enum ActorExitStatus { /// /// (This is equivalent to exit status code 0.) /// Note that this is not really an error. - #[error("Success")] + #[error("success")] Success, /// The actor was asked to gracefully shutdown. /// /// (Semantically equivalent to exit status code 130, triggered by SIGINT aka Ctrl-C, or /// SIGQUIT) - #[error("Quit")] + #[error("quit")] Quit, /// The actor tried to send a message to a dowstream actor and failed. /// The logic ruled that the actor should be killed. /// /// (Semantically equivalent to exit status code 141, triggered by SIGPIPE) - #[error("Downstream actor exited.")] + #[error("downstream actor exited")] DownstreamClosed, /// The actor was killed. @@ -68,15 +68,15 @@ pub enum ActorExitStatus { /// - its kill switch was activated. /// /// (Semantically equivalent to exit status code 137, triggered by SIGKILL) - #[error("Killed")] + #[error("killed")] Killed, /// An unexpected error happened while processing a message. - #[error("Failure(cause={0:?})")] + #[error("failure(cause={0:?})")] Failure(Arc), /// The thread or the task executing the actor loop panicked. - #[error("Panicked")] + #[error("panicked")] Panicked, } diff --git a/quickwit/quickwit-actors/src/channel_with_priority.rs b/quickwit/quickwit-actors/src/channel_with_priority.rs index 1bbf89fff23..2a88d08f6a7 100644 --- a/quickwit/quickwit-actors/src/channel_with_priority.rs +++ b/quickwit/quickwit-actors/src/channel_with_priority.rs @@ -64,17 +64,17 @@ impl LockedOption { #[derive(Debug, Error)] pub enum SendError { - #[error("The channel is closed.")] + #[error("the channel is closed")] Disconnected, - #[error("The channel is full.")] + #[error("the channel is full")] Full, } #[derive(Debug, Error)] pub enum TrySendError { - #[error("The channel is closed.")] + #[error("the channel is closed")] Disconnected, - #[error("The channel is full.")] + #[error("the channel is full")] Full(M), } @@ -89,9 +89,9 @@ impl From> for TrySendError { #[derive(Clone, Copy, Debug, Error, Eq, PartialEq)] pub enum RecvError { - #[error("No message are currently available.")] + #[error("no message are currently available")] NoMessageAvailable, - #[error("All sender were dropped and no pending messages are in the channel.")] + #[error("all senders were dropped and no pending messages are in the channel")] Disconnected, } diff --git a/quickwit/quickwit-actors/src/lib.rs b/quickwit/quickwit-actors/src/lib.rs index ffa384405a5..7aae3de514a 100644 --- a/quickwit/quickwit-actors/src/lib.rs +++ b/quickwit/quickwit-actors/src/lib.rs @@ -121,10 +121,10 @@ const OBSERVE_TIMEOUT: Duration = Duration::from_secs(3); /// Error that occurred while calling `ActorContext::ask(..)` or `Universe::ask` #[derive(Error, Debug)] pub enum AskError { - #[error("Message could not be delivered")] + #[error("message could not be delivered")] MessageNotDelivered, - #[error("Error while the message was being processed.")] + #[error("error while the message was being processed")] ProcessMessageError, - #[error("The handler returned an error: `{0:?}`.")] + #[error("the handler returned an error: `{0:?}`")] ErrorReply(#[from] E), } diff --git a/quickwit/quickwit-actors/src/spawn_builder.rs b/quickwit/quickwit-actors/src/spawn_builder.rs index 6bc52435852..42d94f43ad4 100644 --- a/quickwit/quickwit-actors/src/spawn_builder.rs +++ b/quickwit/quickwit-actors/src/spawn_builder.rs @@ -306,9 +306,9 @@ impl ActorExecutionEnv { .get_mut() .finalize(&exit_status, &self.ctx) .await - .with_context(|| format!("Finalization of actor {}", self.actor.get_mut().name())) + .with_context(|| format!("finalization of actor {}", self.actor.get_mut().name())) { - error!(error=?finalize_error, "Finalizing failed, set exit status to panicked."); + error!(error=?finalize_error, "finalizing failed, set exit status to panicked"); return ActorExitStatus::Panicked; } exit_status diff --git a/quickwit/quickwit-actors/src/supervisor.rs b/quickwit/quickwit-actors/src/supervisor.rs index 6385e214678..841123993d7 100644 --- a/quickwit/quickwit-actors/src/supervisor.rs +++ b/quickwit/quickwit-actors/src/supervisor.rs @@ -246,7 +246,7 @@ mod tests { } FailingActorMessage::ReturnError => { return Err(ActorExitStatus::from(anyhow::anyhow!( - "Failing actor error" + "failing actor error" ))); } FailingActorMessage::Increment => { diff --git a/quickwit/quickwit-actors/src/tests.rs b/quickwit/quickwit-actors/src/tests.rs index 90f97e5fcd4..ebb2e3e1870 100644 --- a/quickwit/quickwit-actors/src/tests.rs +++ b/quickwit/quickwit-actors/src/tests.rs @@ -497,7 +497,7 @@ impl Actor for BuggyFinalizeActor { _exit_status: &ActorExitStatus, _: &ActorContext, ) -> anyhow::Result<()> { - anyhow::bail!("Finalize error") + anyhow::bail!("finalize error") } } diff --git a/quickwit/quickwit-cli/src/checklist.rs b/quickwit/quickwit-cli/src/checklist.rs index f1bf711ed47..2b7f13bfed8 100644 --- a/quickwit/quickwit-cli/src/checklist.rs +++ b/quickwit/quickwit-cli/src/checklist.rs @@ -123,7 +123,7 @@ impl Display for ChecklistError { check_item, check_item_err .as_ref() - .expect_err("ChecklistError can't contain success results.") + .expect_err("ChecklistError can't contain success results") ) }) .join(""); diff --git a/quickwit/quickwit-cli/src/cli.rs b/quickwit/quickwit-cli/src/cli.rs index 3e6d3edd753..e915311a3f7 100644 --- a/quickwit/quickwit-cli/src/cli.rs +++ b/quickwit/quickwit-cli/src/cli.rs @@ -77,14 +77,14 @@ impl CliCommand { pub fn parse_cli_args(mut matches: ArgMatches) -> anyhow::Result { let (subcommand, submatches) = matches .remove_subcommand() - .context("Failed to parse command.")?; + .context("failed to parse command")?; match subcommand.as_str() { "index" => IndexCliCommand::parse_cli_args(submatches).map(CliCommand::Index), "run" => RunCliCommand::parse_cli_args(submatches).map(CliCommand::Run), "source" => SourceCliCommand::parse_cli_args(submatches).map(CliCommand::Source), "split" => SplitCliCommand::parse_cli_args(submatches).map(CliCommand::Split), "tool" => ToolCliCommand::parse_cli_args(submatches).map(CliCommand::Tool), - _ => bail!("Unknown command `{subcommand}`."), + _ => bail!("unknown command `{subcommand}`"), } } diff --git a/quickwit/quickwit-cli/src/index.rs b/quickwit/quickwit-cli/src/index.rs index e7995d41d59..c983b67af9d 100644 --- a/quickwit/quickwit-cli/src/index.rs +++ b/quickwit/quickwit-cli/src/index.rs @@ -259,7 +259,7 @@ impl IndexCliCommand { pub fn parse_cli_args(mut matches: ArgMatches) -> anyhow::Result { let (subcommand, submatches) = matches .remove_subcommand() - .context("Failed to parse index subcommand.")?; + .context("failed to parse index subcommand")?; match subcommand.as_str() { "clear" => Self::parse_clear_args(submatches), "create" => Self::parse_create_args(submatches), @@ -268,7 +268,7 @@ impl IndexCliCommand { "ingest" => Self::parse_ingest_args(submatches), "list" => Self::parse_list_args(submatches), "search" => Self::parse_search_args(submatches), - _ => bail!("Unknown index subcommand `{subcommand}`."), + _ => bail!("unknown index subcommand `{subcommand}`"), } } @@ -339,7 +339,7 @@ impl IndexCliCommand { (false, false) => CommitType::Auto, (false, true) => CommitType::Force, (true, false) => CommitType::WaitFor, - (true, true) => bail!("`--wait` and `--force` are mutually exclusive options."), + (true, true) => bail!("`--wait` and `--force` are mutually exclusive options"), }; if commit_type == CommitType::Auto && client_args.commit_timeout.is_some() { @@ -361,7 +361,7 @@ impl IndexCliCommand { .expect("`index` should be a required arg."); let query = matches .remove_one::("query") - .context("`query` should be a required arg.")?; + .context("`query` should be a required arg")?; let aggregation = matches.remove_one::("aggregation"); let max_hits = matches @@ -779,7 +779,7 @@ pub async fn ingest_docs_cli(args: IngestDocsArgs) -> anyhow::Result<()> { } let progress_bar = match &args.input_path_opt { Some(filepath) => { - let file_len = std::fs::metadata(filepath).context("File not found")?.len(); + let file_len = std::fs::metadata(filepath).context("file not found")?.len(); ProgressBar::new(file_len) } None => ProgressBar::new_spinner(), @@ -834,7 +834,7 @@ pub async fn search_index(args: SearchIndexArgs) -> anyhow::Result = args .aggregation .map(|aggs_string| { - serde_json::from_str(&aggs_string).context("Failed to deserialize aggregations.") + serde_json::from_str(&aggs_string).context("failed to deserialize aggregations") }) .transpose()?; let sort_by = args diff --git a/quickwit/quickwit-cli/src/lib.rs b/quickwit/quickwit-cli/src/lib.rs index e8e99bb1627..2c1bd04b512 100644 --- a/quickwit/quickwit-cli/src/lib.rs +++ b/quickwit/quickwit-cli/src/lib.rs @@ -202,7 +202,7 @@ pub fn parse_duration_or_none(duration_with_unit_str: &str) -> anyhow::Result anyhow::Result { let config_content = load_file(&StorageResolver::unconfigured(), config_uri) .await - .context("Failed to load node config.")?; + .context("failed to load node config")?; let config_format = ConfigFormat::sniff_from_uri(config_uri)?; let config = NodeConfig::load(config_format, config_content.as_slice()) .await - .with_context(|| format!("Failed to parse node config `{config_uri}`."))?; + .with_context(|| format!("failed to parse node config `{config_uri}`"))?; info!(config_uri=%config_uri, config=?config, "Loaded node config."); Ok(config) } diff --git a/quickwit/quickwit-cli/src/main.rs b/quickwit/quickwit-cli/src/main.rs index 488011de258..26cba621fad 100644 --- a/quickwit/quickwit-cli/src/main.rs +++ b/quickwit/quickwit-cli/src/main.rs @@ -52,7 +52,7 @@ fn setup_logging_and_tracing( let env_filter = env::var("RUST_LOG") .map(|_| EnvFilter::from_default_env()) .or_else(|_| EnvFilter::try_new(format!("quickwit={level}"))) - .context("Failed to set up tracing env filter.")?; + .context("failed to set up tracing env filter")?; global::set_text_map_propagator(TraceContextPropagator::new()); let registry = tracing_subscriber::registry().with(env_filter); let event_format = tracing_subscriber::fmt::format() @@ -64,7 +64,7 @@ fn setup_logging_and_tracing( time::format_description::parse( "[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3]Z", ) - .expect("Time format invalid."), + .expect("time format invalid"), ), ); // Note on disabling ANSI characters: setting the ansi boolean on event format is insufficient. @@ -84,7 +84,7 @@ fn setup_logging_and_tracing( .with_trace_config(trace_config) .with_batch_config(batch_config) .install_batch(opentelemetry::runtime::Tokio) - .context("Failed to initialize OpenTelemetry OTLP exporter.")?; + .context("failed to initialize OpenTelemetry OTLP exporter")?; registry .with(tracing_opentelemetry::layer().with_tracer(tracer)) .with( @@ -93,7 +93,7 @@ fn setup_logging_and_tracing( .with_ansi(ansi), ) .try_init() - .context("Failed to set up tracing.")?; + .context("failed to set up tracing")?; } else { registry .with( @@ -102,7 +102,7 @@ fn setup_logging_and_tracing( .with_ansi(ansi), ) .try_init() - .context("Failed to set up tracing.")?; + .context("failed to set up tracing")?; } Ok(()) } diff --git a/quickwit/quickwit-cli/src/source.rs b/quickwit/quickwit-cli/src/source.rs index dcc9251cf5f..353bea71878 100644 --- a/quickwit/quickwit-cli/src/source.rs +++ b/quickwit/quickwit-cli/src/source.rs @@ -209,7 +209,7 @@ impl SourceCliCommand { pub fn parse_cli_args(mut matches: ArgMatches) -> anyhow::Result { let (subcommand, submatches) = matches .remove_subcommand() - .context("Failed to parse source subcommand.")?; + .context("failed to parse source subcommand")?; match subcommand.as_str() { "create" => Self::parse_create_args(submatches).map(Self::CreateSource), "enable" => { @@ -224,7 +224,7 @@ impl SourceCliCommand { "reset-checkpoint" => { Self::parse_reset_checkpoint_args(submatches).map(Self::ResetCheckpoint) } - _ => bail!("Unknown source subcommand `{subcommand}`."), + _ => bail!("unknown source subcommand `{subcommand}`"), } } @@ -348,7 +348,7 @@ async fn toggle_source_cli(args: ToggleSourceArgs) -> anyhow::Result<()> { .sources(&args.index_id) .toggle(&args.source_id, args.enable) .await - .context("Failed to update source")?; + .context("failed to update source")?; let toggled_state_name = if args.enable { "enabled" } else { "disabled" }; println!( @@ -376,7 +376,7 @@ async fn delete_source_cli(args: DeleteSourceArgs) -> anyhow::Result<()> { .sources(&args.index_id) .delete(&args.source_id) .await - .context("Failed to delete source.")?; + .context("failed to delete source")?; println!("{} Source successfully deleted.", "✔".color(GREEN_COLOR)); Ok(()) } @@ -388,7 +388,7 @@ async fn describe_source_cli(args: DescribeSourceArgs) -> anyhow::Result<()> { .indexes() .get(&args.index_id) .await - .context("Failed to fetch index metadata.")?; + .context("failed to fetch index metadata")?; let source_checkpoint = index_metadata .checkpoint .source_checkpoint(&args.source_id) @@ -414,7 +414,7 @@ where let source = sources .into_iter() .find(|source| source.source_id == source_id) - .with_context(|| format!("Source `{source_id}` does not exist."))?; + .with_context(|| format!("source `{source_id}` does not exist"))?; let source_rows = vec![SourceRow { source_id: source.source_id.clone(), @@ -446,7 +446,7 @@ async fn list_sources_cli(args: ListSourcesArgs) -> anyhow::Result<()> { .indexes() .get(&args.index_id) .await - .context("Failed to fetch indexes metadatas.")?; + .context("failed to fetch indexes metadatas")?; let table = make_list_sources_table(index_metadata.sources.into_values()); display_tables(&[table]); Ok(()) diff --git a/quickwit/quickwit-cli/src/split.rs b/quickwit/quickwit-cli/src/split.rs index 4098f4242ab..9cd9b5ca784 100644 --- a/quickwit/quickwit-cli/src/split.rs +++ b/quickwit/quickwit-cli/src/split.rs @@ -117,8 +117,8 @@ impl FromStr for OutputFormat { "pretty-json" | "pretty_json" => Ok(OutputFormat::PrettyJson), "table" => Ok(OutputFormat::Table), _ => bail!( - "Unkown output format `{output_format_str}`. Supported formats are: `table`, \ - `json`, and `pretty-json`." + "unkown output format `{output_format_str}`. supported formats are: `table`, \ + `json`, and `pretty-json`" ), } } @@ -163,12 +163,12 @@ impl SplitCliCommand { pub fn parse_cli_args(mut matches: ArgMatches) -> anyhow::Result { let (subcommand, submatches) = matches .remove_subcommand() - .context("Failed to split subcommand.")?; + .context("failed to split subcommand")?; match subcommand.as_str() { "describe" => Self::parse_describe_args(submatches), "list" => Self::parse_list_args(submatches), "mark-for-deletion" => Self::parse_mark_for_deletion_args(submatches), - _ => bail!("Unknown split subcommand `{subcommand}`."), + _ => bail!("unknown split subcommand `{subcommand}`"), } } @@ -290,7 +290,7 @@ async fn list_split_cli(args: ListSplitArgs) -> anyhow::Result<()> { .splits(&args.index_id) .list(list_splits_query_params) .await - .context("Failed to list splits.")?; + .context("failed to list splits")?; let output = match args.output_format { OutputFormat::Json => serde_json::to_string(&splits)?, OutputFormat::PrettyJson => serde_json::to_string_pretty(&splits)?, @@ -343,7 +343,7 @@ async fn describe_split_cli(args: DescribeSplitArgs) -> anyhow::Result<()> { .find(|split| split.split_id() == args.split_id) .with_context(|| { format!( - "Could not find split metadata in metastore {}", + "could not find split metadata in metastore {}", args.split_id ) })?; @@ -430,8 +430,8 @@ fn parse_date(date_arg: &str, option_name: &str) -> anyhow::Result anyhow::Result { "published" => SplitState::Published, "marked" => SplitState::MarkedForDeletion, _ => bail!(format!( - "Unknown split state `{split_state_arg}`. Possible values are `staged`, `published`, \ - and `marked`." + "unknown split state `{split_state_arg}`. possible values are `staged`, `published`, \ + and `marked`" )), }; Ok(split_state) diff --git a/quickwit/quickwit-cli/src/tool.rs b/quickwit/quickwit-cli/src/tool.rs index 6cf78ec24f5..73e17c710ff 100644 --- a/quickwit/quickwit-cli/src/tool.rs +++ b/quickwit/quickwit-cli/src/tool.rs @@ -229,14 +229,14 @@ impl ToolCliCommand { pub fn parse_cli_args(mut matches: ArgMatches) -> anyhow::Result { let (subcommand, submatches) = matches .remove_subcommand() - .context("Failed to parse tool subcommand.")?; + .context("failed to parse tool subcommand")?; match subcommand.as_str() { "gc" => Self::parse_garbage_collect_args(submatches), "local-ingest" => Self::parse_local_ingest_args(submatches), "local-search" => Self::parse_local_search_args(submatches), "merge" => Self::parse_merge_args(submatches), "extract-split" => Self::parse_extract_split_args(submatches), - _ => bail!("Unknown tool subcommand `{subcommand}`."), + _ => bail!("unknown tool subcommand `{subcommand}`"), } } @@ -286,7 +286,7 @@ impl ToolCliCommand { .expect("`index` should be a required arg."); let query = matches .remove_one::("query") - .context("`query` should be a required arg.")?; + .context("`query` should be a required arg")?; let aggregation = matches.remove_one::("aggregation"); let max_hits = matches .remove_one::("max-hits") @@ -518,7 +518,7 @@ pub async fn local_ingest_docs_cli(args: LocalIngestDocsArgs) -> anyhow::Result< println!("{} Documents successfully indexed.", "✔".color(GREEN_COLOR)); Ok(()) } - _ => bail!("Failed to ingest all the documents."), + _ => bail!("failed to ingest all the documents"), } } diff --git a/quickwit/quickwit-cli/tests/helpers.rs b/quickwit/quickwit-cli/tests/helpers.rs index 4893d916f71..25dd105db67 100644 --- a/quickwit/quickwit-cli/tests/helpers.rs +++ b/quickwit/quickwit-cli/tests/helpers.rs @@ -240,7 +240,7 @@ pub async fn create_test_env( resource_files["index_config"].display() )); let cluster_endpoint = Url::parse(&format!("http://localhost:{rest_listen_port}")) - .context("Failed to parse cluster endpoint.")?; + .context("failed to parse cluster endpoint")?; Ok(TestEnv { _temp_dir: temp_dir, diff --git a/quickwit/quickwit-cluster/src/cluster.rs b/quickwit/quickwit-cluster/src/cluster.rs index eddb57ca858..30c235704ab 100644 --- a/quickwit/quickwit-cluster/src/cluster.rs +++ b/quickwit/quickwit-cluster/src/cluster.rs @@ -254,7 +254,7 @@ impl Cluster { .next(), ) .await - .context("Deadline has passed before predicate held true.")?; + .context("deadline has passed before predicate held true")?; Ok(()) } diff --git a/quickwit/quickwit-cluster/src/member.rs b/quickwit/quickwit-cluster/src/member.rs index 81819fa9f91..2daabf35fc0 100644 --- a/quickwit/quickwit-cluster/src/member.rs +++ b/quickwit/quickwit-cluster/src/member.rs @@ -51,11 +51,11 @@ impl NodeStateExt for NodeState { fn grpc_advertise_addr(&self) -> anyhow::Result { self.get(GRPC_ADVERTISE_ADDR_KEY) .with_context(|| { - format!("Could not find key `{GRPC_ADVERTISE_ADDR_KEY}` in Chitchat node state.") + format!("could not find key `{GRPC_ADVERTISE_ADDR_KEY}` in Chitchat node state") }) .map(|grpc_advertise_addr_value| { grpc_advertise_addr_value.parse().with_context(|| { - format!("Failed to parse gRPC advertise address `{grpc_advertise_addr_value}`.") + format!("failed to parse gRPC advertise address `{grpc_advertise_addr_value}`") }) })? } @@ -138,7 +138,7 @@ pub(crate) fn build_cluster_member( .get(ENABLED_SERVICES_KEY) .ok_or_else(|| { anyhow::anyhow!( - "Could not find `{}` key in node `{}` state.", + "could not find `{}` key in node `{}` state", ENABLED_SERVICES_KEY, chitchat_id.node_id ) @@ -164,7 +164,7 @@ pub(crate) fn build_cluster_member( fn parse_indexing_task_key(key: &str) -> anyhow::Result { let (_prefix, reminder) = key.split_once(INDEXING_TASK_SEPARATOR).ok_or_else(|| { anyhow!( - "Indexing task must contain the delimiter character `:`: `{}`", + "indexing task must contain the delimiter character `:`: `{}`", key ) })?; diff --git a/quickwit/quickwit-codegen/example/src/error.rs b/quickwit/quickwit-codegen/example/src/error.rs index 1e819c0e145..fdd391489cd 100644 --- a/quickwit/quickwit-codegen/example/src/error.rs +++ b/quickwit/quickwit-codegen/example/src/error.rs @@ -24,9 +24,9 @@ use quickwit_actors::AskError; // Service errors have to be handwritten before codegen. #[derive(Debug, thiserror::Error)] pub enum HelloError { - #[error("Internal error: {0}")] + #[error("internal error: {0}")] InternalError(String), - #[error("Transport error: {0}")] + #[error("transport error: {0}")] TransportError(#[from] tonic::Status), } diff --git a/quickwit/quickwit-common/src/net.rs b/quickwit/quickwit-common/src/net.rs index da895447206..7818653ae82 100644 --- a/quickwit/quickwit-common/src/net.rs +++ b/quickwit/quickwit-common/src/net.rs @@ -127,7 +127,7 @@ impl FromStr for Host { if is_valid_hostname(host) { return Ok(Self::Hostname(host.to_string())); } - bail!("Failed to parse host: `{host}`.") + bail!("failed to parse host: `{host}`") } } @@ -164,7 +164,7 @@ impl HostAddr { } let (hostname, port) = if let Some((hostname_str, port_str)) = host_addr.split_once(':') { let port_u16 = port_str.parse::().with_context(|| { - format!("Failed to parse address `{host_addr}`: port is invalid.") + format!("failed to parse address `{host_addr}`: port is invalid") })?; (hostname_str, port_u16) } else { @@ -172,7 +172,7 @@ impl HostAddr { }; if !is_valid_hostname(hostname) { bail!( - "Failed to parse address `{}`: hostname is invalid.", + "failed to parse address `{}`: hostname is invalid", host_addr ) } @@ -272,10 +272,10 @@ pub async fn get_socket_addr( ) -> anyhow::Result { lookup_host(addr) .await - .with_context(|| format!("Failed to parse address or resolve hostname {addr:?}."))? + .with_context(|| format!("failed to parse address or resolve hostname {addr:?}"))? .next() .ok_or_else(|| { - anyhow::anyhow!("DNS resolution did not yield any record for hostname {addr:?}.") + anyhow::anyhow!("DNS resolution did not yield any record for hostname {addr:?}") }) } @@ -334,7 +334,7 @@ fn _get_hostname(hostname: OsString) -> io::Result { } else { Err(io::Error::new( io::ErrorKind::Other, - format!("Invalid hostname: `{hostname_lossy}`."), + format!("invalid hostname: `{hostname_lossy}`"), )) } } diff --git a/quickwit/quickwit-common/src/temp_dir.rs b/quickwit/quickwit-common/src/temp_dir.rs index 8b423e75c6d..6490cc07e4d 100644 --- a/quickwit/quickwit-common/src/temp_dir.rs +++ b/quickwit/quickwit-common/src/temp_dir.rs @@ -148,7 +148,7 @@ impl<'a> Builder<'a> { if self.max_length < self.parts.len() + separator_count + self.num_rand_chars { return Err(io::Error::new( io::ErrorKind::InvalidInput, - "The filename limit is too small", + "the filename limit is too small", )); } // Calculate how many characters from the parts we can use in the final string. @@ -294,7 +294,7 @@ mod tests { assert_prefix(vec!["abcde", "uvwxyz"], 5, "ab%u%"); assert_prefix(vec!["abcde", "uvwxyz"], 4, "a%u%"); assert_prefix_err( - "The filename limit is too small", + "the filename limit is too small", vec!["abcde", "uvwxyz"], 3, ); @@ -310,7 +310,7 @@ mod tests { assert_prefix(vec!["0", "abcde", "uvwxyz"], 7, "0%ab%u%"); assert_prefix(vec!["0", "abcde", "uvwxyz"], 6, "0%a%u%"); assert_prefix_err( - "The filename limit is too small", + "the filename limit is too small", vec!["0", "abcde", "uvwxyz"], 5, ); @@ -368,7 +368,7 @@ mod tests { if parts_num > 0 && rng.gen::() { builder.max_length(rand::random::() % limit_threshold); assert_eq!( - "The filename limit is too small", + "the filename limit is too small", builder.prefix().unwrap_err().to_string() ); } else { diff --git a/quickwit/quickwit-common/src/test_utils.rs b/quickwit/quickwit-common/src/test_utils.rs index 7d3d2762ff9..fbc0d303fdc 100644 --- a/quickwit/quickwit-common/src/test_utils.rs +++ b/quickwit/quickwit-common/src/test_utils.rs @@ -73,7 +73,7 @@ pub async fn wait_for_server_ready(socket_addr: SocketAddr) -> anyhow::Result<() } } if num_attempts == max_num_attempts { - anyhow::bail!("Too many attempts to connect to `{}`", socket_addr); + anyhow::bail!("too many attempts to connect to `{}`", socket_addr); } Ok(()) } diff --git a/quickwit/quickwit-common/src/tower/buffer.rs b/quickwit/quickwit-common/src/tower/buffer.rs index fd423ea55c0..1d4e8b4c571 100644 --- a/quickwit/quickwit-common/src/tower/buffer.rs +++ b/quickwit/quickwit-common/src/tower/buffer.rs @@ -31,9 +31,9 @@ use super::{BoxError, BoxFuture}; #[derive(Debug, thiserror::Error)] pub enum BufferError { - #[error("The buffer's worker closed unexpectedly.")] + #[error("the buffer's worker closed unexpectedly")] Closed, - #[error("The buffer service returned an unknown error.")] + #[error("the buffer service returned an unknown error")] Unknown, } @@ -183,11 +183,11 @@ mod tests { #[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)] enum MyServiceError { - #[error("Service is exhausted")] + #[error("service is exhausted")] Exhausted, - #[error("Service is unavailable")] + #[error("service is unavailable")] Unavailable, - #[error("Service attempted to divide by zero")] + #[error("service attempted to divide by zero")] ZeroDivision, } diff --git a/quickwit/quickwit-common/src/tower/rate_limit.rs b/quickwit/quickwit-common/src/tower/rate_limit.rs index ff600d6457b..7a86f674404 100644 --- a/quickwit/quickwit-common/src/tower/rate_limit.rs +++ b/quickwit/quickwit-common/src/tower/rate_limit.rs @@ -218,7 +218,7 @@ mod tests { } #[derive(Debug, Clone, thiserror::Error)] - #[error("Rate meter error")] + #[error("rate meter error")] struct RateMeterError; impl From for RateMeterError { diff --git a/quickwit/quickwit-common/src/uri.rs b/quickwit/quickwit-common/src/uri.rs index f46417963f1..ac9106624d9 100644 --- a/quickwit/quickwit-common/src/uri.rs +++ b/quickwit/quickwit-common/src/uri.rs @@ -108,7 +108,7 @@ impl FromStr for Protocol { "pg" | "postgres" | "postgresql" => Ok(Protocol::PostgreSQL), "ram" => Ok(Protocol::Ram), "s3" => Ok(Protocol::S3), - _ => bail!("Unknown URI protocol `{protocol}`."), + _ => bail!("unknown URI protocol `{protocol}`"), } } } @@ -236,7 +236,7 @@ impl Uri { pub fn join + std::fmt::Debug>(&self, path: P) -> anyhow::Result { if path.as_ref().is_absolute() { bail!( - "Cannot join URI `{}` with absolute path `{:?}`.", + "cannot join URI `{}` with absolute path `{:?}`", self.uri, path ); @@ -247,7 +247,7 @@ impl Uri { .to_string_lossy() .to_string(), Protocol::PostgreSQL => bail!( - "Cannot join PostgreSQL URI `{}` with path `{:?}`.", + "cannot join PostgreSQL URI `{}` with path `{:?}`", self.uri, path ), @@ -272,7 +272,7 @@ impl Uri { fn parse_str(uri_str: &str) -> anyhow::Result { // CAUTION: Do not display the URI in error messages to avoid leaking credentials. if uri_str.is_empty() { - bail!("Failed to parse empty URI."); + bail!("failed to parse empty URI"); } let (protocol, mut path) = match uri_str.split_once(PROTOCOL_SEPARATOR) { None => (Protocol::File, uri_str.to_string()), @@ -283,11 +283,11 @@ impl Uri { // We only accept `~` (alias to the home directory) and `~/path/to/something`. // If there is something following the `~` that is not `/`, we bail. if path.len() > 1 && !path.starts_with("~/") { - bail!("Failed to normalize URI: tilde expansion is only partially supported."); + bail!("failed to normalize URI: tilde expansion is only partially supported"); } let home_dir_path = home::home_dir() - .context("Failed to normalize URI: could not resolve home directory.")? + .context("failed to normalize URI: could not resolve home directory")? .to_string_lossy() .to_string(); @@ -295,8 +295,8 @@ impl Uri { } if Path::new(&path).is_relative() { let current_dir = env::current_dir().context( - "Failed to normalize URI: could not resolve current working directory. The \ - directory does not exist or user has insufficient permissions.", + "failed to normalize URI: could not resolve current working directory. the \ + directory does not exist or user has insufficient permissions", )?; path = current_dir.join(path).to_string_lossy().to_string(); } @@ -447,7 +447,7 @@ mod tests { ); assert_eq!( Uri::from_str("~anything/bar").unwrap_err().to_string(), - "Failed to normalize URI: tilde expansion is only partially supported." + "failed to normalize URI: tilde expansion is only partially supported" ); assert_eq!( Uri::from_str("~/.").unwrap(), @@ -491,7 +491,7 @@ mod tests { Uri::from_str("http://localhost:9000/quickwit") .unwrap_err() .to_string(), - "Unknown URI protocol `http`." + "unknown URI protocol `http`" ); } diff --git a/quickwit/quickwit-config/src/config_value.rs b/quickwit/quickwit-config/src/config_value.rs index f4833a2b267..fa6e1be72ad 100644 --- a/quickwit/quickwit-config/src/config_value.rs +++ b/quickwit/quickwit-config/src/config_value.rs @@ -77,7 +77,7 @@ where if let Some(env_var_value) = env_vars.get(*env_var_key) { let value = env_var_value.parse::().map_err(|error| { anyhow::anyhow!( - "Failed to convert value `{env_var_value}` read from environment \ + "failed to convert value `{env_var_value}` read from environment \ variable `{env_var_key}` to type `{}`: {error:?}", any::type_name::(), ) @@ -91,8 +91,8 @@ where pub(crate) fn resolve(self, env_vars: &HashMap) -> anyhow::Result { self.resolve_optional(env_vars)?.context( - "Failed to resolve field value: no value was provided via environment variable or \ - config file, and the field has no default.", + "failed to resolve field value: no value was provided via environment variable or \ + config file, and the field has no default", ) } } diff --git a/quickwit/quickwit-config/src/index_config/mod.rs b/quickwit/quickwit-config/src/index_config/mod.rs index 80ce657a968..291884e7fdf 100644 --- a/quickwit/quickwit-config/src/index_config/mod.rs +++ b/quickwit/quickwit-config/src/index_config/mod.rs @@ -242,7 +242,7 @@ impl RetentionPolicy { pub fn retention_period(&self) -> anyhow::Result { parse_duration(&self.retention_period).with_context(|| { format!( - "Failed to parse retention period `{}`.", + "failed to parse retention period `{}`", self.retention_period ) }) @@ -253,7 +253,7 @@ impl RetentionPolicy { Schedule::from_str(&evaluation_schedule).with_context(|| { format!( - "Failed to parse retention evaluation schedule `{}`.", + "failed to parse retention evaluation schedule `{}`", self.evaluation_schedule ) }) @@ -752,7 +752,7 @@ mod tests { assert!(parsing_config_error .root_cause() .to_string() - .contains("Failed to parse human-readable duration `x`")); + .contains("failed to parse human-readable duration `x`")); } #[test] @@ -817,7 +817,7 @@ mod tests { }; assert_eq!( retention_policy.retention_period().unwrap_err().to_string(), - "Failed to parse retention period `foo`." + "failed to parse retention period `foo`" ); } } diff --git a/quickwit/quickwit-config/src/index_config/serialize.rs b/quickwit/quickwit-config/src/index_config/serialize.rs index cbfe13384e4..30733c1edbc 100644 --- a/quickwit/quickwit-config/src/index_config/serialize.rs +++ b/quickwit/quickwit-config/src/index_config/serialize.rs @@ -68,9 +68,9 @@ impl IndexConfigForSerialization { if let Some(index_uri) = self.index_uri.as_ref() { return Ok(index_uri.clone()); } - let default_index_root_uri = default_index_root_uri_opt.context("Missing `index_uri`")?; + let default_index_root_uri = default_index_root_uri_opt.context("missing `index_uri`")?; let index_uri: Uri = default_index_root_uri.join(&self.index_id) - .context("Failed to create default index URI. This should never happen! Please, report on https://github.com/quickwit-oss/quickwit/issues.")?; + .context("failed to create default index URI. this should never happen! please, report on https://github.com/quickwit-oss/quickwit/issues")?; info!( index_id = %self.index_id, index_uri = %index_uri, @@ -92,8 +92,8 @@ impl IndexConfigForSerialization { if self.doc_mapping.timestamp_field.is_none() { anyhow::bail!( - "Failed to validate index config. The retention policy requires a timestamp \ - field, but the indexing settings do not declare one." + "failed to validate index config. the retention policy requires a timestamp \ + field, but the indexing settings do not declare one" ); } } @@ -204,8 +204,8 @@ mod test { .to_string(); assert_eq!( validation_err, - "Index config merge policy `max_merge_factor` must be superior or equal to \ - `merge_factor`." + "index config merge policy `max_merge_factor` must be superior or equal to \ + `merge_factor`" ); } @@ -222,7 +222,7 @@ mod test { .validate_and_build(None) .unwrap_err() .to_string(); - assert!(validation_err.contains("The retention policy requires a timestamp field")); + assert!(validation_err.contains("the retention policy requires a timestamp field")); } #[test] @@ -234,7 +234,7 @@ mod test { "#; let config_parse_result: anyhow::Result = ConfigFormat::Yaml.parse(config_yaml.as_bytes()); - assert!(format!("{:?}", config_parse_result.unwrap_err()).contains("Missing `index_uri`")); + assert!(format!("{:?}", config_parse_result.unwrap_err()).contains("missing `index_uri`")); } #[test] diff --git a/quickwit/quickwit-config/src/lib.rs b/quickwit/quickwit-config/src/lib.rs index eec50cb6ab1..4dc582de22a 100644 --- a/quickwit/quickwit-config/src/lib.rs +++ b/quickwit/quickwit-config/src/lib.rs @@ -114,8 +114,8 @@ pub fn validate_identifier(label: &str, value: &str) -> anyhow::Result<()> { return Ok(()); } bail!( - "{label} identifier `{value}` is invalid. Identifiers must match the following regular \ - expression: `^[a-zA-Z][a-zA-Z0-9-_\\.]{{2,254}}$`." + "{label} identifier `{value}` is invalid. identifiers must match the following regular \ + expression: `^[a-zA-Z][a-zA-Z0-9-_\\.]{{2,254}}$`" ); } @@ -130,8 +130,8 @@ pub fn validate_index_id_pattern(pattern: &str) -> anyhow::Result<()> { .is_match(pattern) { bail!( - "Index ID pattern `{pattern}` is invalid. Patterns must match the following regular \ - expression: `^[a-zA-Z\\*][a-zA-Z0-9-_\\.\\*]{{0,254}}$`." + "index ID pattern `{pattern}` is invalid. patterns must match the following regular \ + expression: `^[a-zA-Z\\*][a-zA-Z0-9-_\\.\\*]{{0,254}}$`" ); } @@ -139,15 +139,15 @@ pub fn validate_index_id_pattern(pattern: &str) -> anyhow::Result<()> { // as multiple stars does not bring any value. if pattern.contains("**") { bail!( - "Index ID pattern `{pattern}` is invalid. Patterns must not contain multiple \ - consecutive `*`." + "index ID pattern `{pattern}` is invalid. patterns must not contain multiple \ + consecutive `*`" ); } // If there is no star in the pattern, we need at least 3 characters. if !pattern.contains('*') && pattern.len() < 3 { bail!( - "Index ID pattern `{pattern}` is invalid. An index ID must have at least 3 characters." + "index ID pattern `{pattern}` is invalid. an index ID must have at least 3 characters" ); } @@ -157,8 +157,8 @@ pub fn validate_index_id_pattern(pattern: &str) -> anyhow::Result<()> { pub fn validate_node_id(node_id: &str) -> anyhow::Result<()> { if !is_valid_hostname(node_id) { bail!( - "Node identifier `{node_id}` is invalid. Node identifiers must be valid short \ - hostnames (see RFC 1123)." + "node identifier `{node_id}` is invalid. node identifiers must be valid short \ + hostnames (see RFC 1123)" ); } Ok(()) @@ -183,12 +183,12 @@ impl ConfigFormat { pub fn sniff_from_uri(uri: &Uri) -> anyhow::Result { let extension_str: &str = uri.extension().with_context(|| { format!( - "Failed to read config file `{uri}`: file extension is missing. Supported file \ - formats and extensions are JSON (.json), TOML (.toml), and YAML (.yaml or .yml)." + "failed to read config file `{uri}`: file extension is missing. supported file \ + formats and extensions are JSON (.json), TOML (.toml), and YAML (.yaml or .yml)" ) })?; ConfigFormat::from_str(extension_str) - .with_context(|| format!("Failed to identify configuration file format {uri}.")) + .with_context(|| format!("failed to identify configuration file format {uri}")) } pub fn parse(&self, payload: &[u8]) -> anyhow::Result @@ -197,31 +197,31 @@ impl ConfigFormat { ConfigFormat::Json => { let mut json_value: JsonValue = serde_json::from_reader(StripComments::new(payload))?; - let version_value = json_value.get_mut("version").context("Missing version.")?; + let version_value = json_value.get_mut("version").context("missing version")?; if let Some(version_number) = version_value.as_u64() { warn!("`version` is supposed to be a string."); *version_value = JsonValue::String(version_number.to_string()); } - serde_json::from_value(json_value).context("Failed to read JSON file.") + serde_json::from_value(json_value).context("failed to read JSON file") } ConfigFormat::Toml => { let payload_str = std::str::from_utf8(payload) - .context("Configuration file contains invalid UTF-8 characters.")?; + .context("configuration file contains invalid UTF-8 characters")?; let mut toml_value: toml::Value = - toml::from_str(payload_str).context("Failed to read TOML file.")?; - let version_value = toml_value.get_mut("version").context("Missing version.")?; + toml::from_str(payload_str).context("failed to read TOML file")?; + let version_value = toml_value.get_mut("version").context("missing version")?; if let Some(version_number) = version_value.as_integer() { warn!("`version` is supposed to be a string."); *version_value = toml::Value::String(version_number.to_string()); let reserialized = toml::to_string(version_value) - .context("Failed to reserialize toml config.")?; - toml::from_str(&reserialized).context("Failed to read TOML file.") + .context("failed to reserialize toml config")?; + toml::from_str(&reserialized).context("failed to read TOML file") } else { - toml::from_str(payload_str).context("Failed to read TOML file.") + toml::from_str(payload_str).context("failed to read TOML file") } } ConfigFormat::Yaml => { - serde_yaml::from_slice(payload).context("Failed to read YAML file.") + serde_yaml::from_slice(payload).context("failed to read YAML file") } } } @@ -236,8 +236,8 @@ impl FromStr for ConfigFormat { "toml" => Ok(Self::Toml), "yaml" | "yml" => Ok(Self::Yaml), _ => bail!( - "File extension `.{ext}` is not supported. Supported file formats and extensions \ - are JSON (.json), TOML (.toml), and YAML (.yaml or .yml).", + "file extension `.{ext}` is not supported. supported file formats and extensions \ + are JSON (.json), TOML (.toml), and YAML (.yaml or .yml)", ), } } @@ -283,6 +283,6 @@ mod tests { assert!(validate_index_id_pattern("foo!") .unwrap_err() .to_string() - .contains("Index ID pattern `foo!` is invalid.")); + .contains("index ID pattern `foo!` is invalid.")); } } diff --git a/quickwit/quickwit-config/src/merge_policy_config.rs b/quickwit/quickwit-config/src/merge_policy_config.rs index 0416b97d685..77605046c1b 100644 --- a/quickwit/quickwit-config/src/merge_policy_config.rs +++ b/quickwit/quickwit-config/src/merge_policy_config.rs @@ -114,7 +114,7 @@ where D: Deserializer<'de> { let value: String = Deserialize::deserialize(deserializer)?; let duration = humantime::parse_duration(&value).map_err(|error| { de::Error::custom(format!( - "Failed to parse human-readable duration `{value}`: {error:?}", + "failed to parse human-readable duration `{value}`: {error:?}", )) })?; Ok(duration) @@ -158,8 +158,8 @@ impl MergePolicyConfig { }; if max_merge_factor < merge_factor { anyhow::bail!( - "Index config merge policy `max_merge_factor` must be superior or equal to \ - `merge_factor`." + "index config merge policy `max_merge_factor` must be superior or equal to \ + `merge_factor`" ); } Ok(()) diff --git a/quickwit/quickwit-config/src/metastore_config.rs b/quickwit/quickwit-config/src/metastore_config.rs index cc16700a737..264a5874041 100644 --- a/quickwit/quickwit-config/src/metastore_config.rs +++ b/quickwit/quickwit-config/src/metastore_config.rs @@ -65,7 +65,7 @@ impl MetastoreConfigs { for (left, right) in backends.iter().zip(backends.iter().skip(1)) { ensure!( left != right, - "{left:?} metastore config is defined multiple times." + "{left:?} metastore config is defined multiple times" ); } Ok(()) diff --git a/quickwit/quickwit-config/src/node_config/mod.rs b/quickwit/quickwit-config/src/node_config/mod.rs index 3a92eff67f7..edd8fe33b87 100644 --- a/quickwit/quickwit-config/src/node_config/mod.rs +++ b/quickwit/quickwit-config/src/node_config/mod.rs @@ -281,7 +281,7 @@ impl NodeConfig { let config = load_node_config_with_env(config_format, config_content, &env_vars).await?; if !config.data_dir_path.try_exists()? { bail!( - "Data dir `{}` does not exist.", + "data dir `{}` does not exist", config.data_dir_path.display() ); } @@ -308,7 +308,7 @@ impl NodeConfig { } if !self.peer_seeds.is_empty() && peer_seed_addrs.is_empty() { bail!( - "Failed to resolve any of the peer seed addresses: `{}`", + "failed to resolve any of the peer seed addresses: `{}`", self.peer_seeds.join(", ") ) } diff --git a/quickwit/quickwit-config/src/node_config/serialize.rs b/quickwit/quickwit-config/src/node_config/serialize.rs index fa4036735ea..6a8d42d5beb 100644 --- a/quickwit/quickwit-config/src/node_config/serialize.rs +++ b/quickwit/quickwit-config/src/node_config/serialize.rs @@ -103,7 +103,7 @@ fn default_advertise_host(listen_ip: &IpAddr) -> anyhow::Result { info!(advertise_address=%private_ip, interface_name=%interface_name, "Using sniffed advertise address."); return Ok(Host::from(private_ip)); } - bail!("Listen address `{listen_ip}` is unspecified and advertise address is not set."); + bail!("listen address `{listen_ip}` is unspecified and advertise address is not set"); } info!(advertise_address=%listen_ip, "Using listen address as advertise address."); Ok(Host::from(*listen_ip)) @@ -248,8 +248,8 @@ impl NodeConfigBuilder { .filepath() .with_context(|| { format!( - "Data dir must be located on the local file system. Current location: \ - `{data_dir_uri}`." + "data dir must be located on the local file system. current location: \ + `{data_dir_uri}`" ) })? .to_path_buf(); @@ -851,7 +851,7 @@ mod tests { let error = NodeConfig::load(ConfigFormat::Yaml, file.as_bytes()) .await .unwrap_err(); - assert!(error.to_string().contains("Data dir")); + assert!(error.to_string().contains("data dir")); } #[tokio::test] @@ -929,7 +929,7 @@ mod tests { ) .await .unwrap_err(); - assert!(error.to_string().contains("Data dir must be located")); + assert!(error.to_string().contains("data dir must be located")); } } diff --git a/quickwit/quickwit-config/src/service.rs b/quickwit/quickwit-config/src/service.rs index b7862f40618..2144b351cbb 100644 --- a/quickwit/quickwit-config/src/service.rs +++ b/quickwit/quickwit-config/src/service.rs @@ -77,7 +77,7 @@ impl FromStr for QuickwitService { "metastore" => Ok(QuickwitService::Metastore), _ => { bail!( - "Failed to parse service `{service_str}`. Supported services are: `{}`.", + "failed to parse service `{service_str}`. supported services are: `{}`", QuickwitService::supported_services().iter().join("`, `") ) } diff --git a/quickwit/quickwit-config/src/source_config/mod.rs b/quickwit/quickwit-config/src/source_config/mod.rs index a0c42eae0b9..7a70921bbd8 100644 --- a/quickwit/quickwit-config/src/source_config/mod.rs +++ b/quickwit/quickwit-config/src/source_config/mod.rs @@ -213,7 +213,7 @@ impl FromStr for SourceInputFormat { match format_str { "json" => Ok(Self::Json), "plain" => Ok(Self::PlainText), - unknown => Err(format!("Unknown source input format: `{unknown}`.")), + unknown => Err(format!("unknown source input format: `{unknown}`")), } } } @@ -359,9 +359,7 @@ impl TryFrom for KinesisSourceParams { fn try_from(value: KinesisSourceParamsInner) -> Result { if value.region.is_some() && value.endpoint.is_some() { - return Err( - "Kinesis source parameters `region` and `endpoint` are mutually exclusive.", - ); + return Err("Kinesis source parameters `region` and `endpoint` are mutually exclusive"); } let region = value.region.map(RegionOrEndpoint::Region); let endpoint = value.endpoint.map(RegionOrEndpoint::Endpoint); @@ -429,8 +427,8 @@ where D: Deserializer<'de> { if uri.strip_prefix("pulsar://").is_none() { return Err(Error::custom(format!( - "Invalid Pulsar uri provided, must be in the format of `pulsar://host:port/path`. \ - Got: `{uri}`" + "invalid Pulsar uri provided, must be in the format of `pulsar://host:port/path`. \ + got: `{uri}`" ))); } @@ -494,7 +492,7 @@ impl TransformConfig { use anyhow::Context; let timezone = vrl::compiler::TimeZone::parse(&self.timezone).with_context(|| { format!( - "Failed to parse timezone: `{}`. Timezone must be a valid name \ + "failed to parse timezone: `{}`. timezone must be a valid name \ in the TZ database: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones", self.timezone, ) @@ -509,7 +507,7 @@ impl TransformConfig { Err(diagnostics) => { let mut formatter = vrl::diagnostic::Formatter::new(&vrl_script, diagnostics); formatter.enable_colors(!quickwit_common::no_color()); - anyhow::bail!("Failed to compile VRL script:\n {formatter}") + anyhow::bail!("failed to compile VRL script:\n {formatter}") } }; diff --git a/quickwit/quickwit-config/src/source_config/serialize.rs b/quickwit/quickwit-config/src/source_config/serialize.rs index 4d92b2e9117..1fa184d0d29 100644 --- a/quickwit/quickwit-config/src/source_config/serialize.rs +++ b/quickwit/quickwit-config/src/source_config/serialize.rs @@ -76,17 +76,17 @@ impl SourceConfigForSerialization { validate_identifier("Source ID", &self.source_id)?; } let desired_num_pipelines = NonZeroUsize::new(self.desired_num_pipelines) - .ok_or_else(|| anyhow::anyhow!("`desired_num_pipelines` must be strictly positive."))?; + .ok_or_else(|| anyhow::anyhow!("`desired_num_pipelines` must be strictly positive"))?; let max_num_pipelines_per_indexer = NonZeroUsize::new(self.max_num_pipelines_per_indexer) .ok_or_else(|| { - anyhow::anyhow!("`max_num_pipelines_per_indexer` must be strictly positive.") + anyhow::anyhow!("`max_num_pipelines_per_indexer` must be strictly positive") })?; match &self.source_params { // We want to forbid source_config with no filepath SourceParams::File(file_params) => { if file_params.filepath.is_none() { bail!( - "Source `{}` of type `file` must contain a filepath.", + "source `{}` of type `file` must contain a filepath", self.source_id ) } @@ -105,7 +105,7 @@ impl SourceConfigForSerialization { SourceParams::GcpPubSub(_) | SourceParams::Kafka(_) => {} _ => { if self.desired_num_pipelines > 1 || self.max_num_pipelines_per_indexer > 1 { - bail!("Quickwit currently supports multiple pipelines only for GCP PubSub or Kafka sources. Open an issue https://github.com/quickwit-oss/quickwit/issues if you need the feature for other source types."); + bail!("Quickwit currently supports multiple pipelines only for GCP PubSub or Kafka sources. open an issue https://github.com/quickwit-oss/quickwit/issues if you need the feature for other source types"); } } } diff --git a/quickwit/quickwit-config/src/storage_config.rs b/quickwit/quickwit-config/src/storage_config.rs index 6f594cb192b..f238ff9211d 100644 --- a/quickwit/quickwit-config/src/storage_config.rs +++ b/quickwit/quickwit-config/src/storage_config.rs @@ -99,7 +99,7 @@ impl StorageConfigs { for (left, right) in backends.iter().zip(backends.iter().skip(1)) { ensure!( left != right, - "{left:?} storage config is defined multiple times.", + "{left:?} storage config is defined multiple times", ); } Ok(()) diff --git a/quickwit/quickwit-config/src/templating.rs b/quickwit/quickwit-config/src/templating.rs index 0905a82e996..f7c750f5cdd 100644 --- a/quickwit/quickwit-config/src/templating.rs +++ b/quickwit/quickwit-config/src/templating.rs @@ -36,7 +36,7 @@ static TEMPLATE_ENV_VAR_CAPTURE: Lazy = Lazy::new(|| { pub fn render_config(config_content: &[u8]) -> Result { let template_str = std::str::from_utf8(config_content) - .context("Config file contains invalid UTF-8 characters.")?; + .context("config file contains invalid UTF-8 characters")?; let mut values = HashMap::new(); @@ -77,8 +77,8 @@ pub fn render_config(config_content: &[u8]) -> Result { default_value } else { bail!( - "Failed to render config file template: environment variable \ - `{env_var_key}` is not set and no default value is provided." + "failed to render config file template: environment variable \ + `{env_var_key}` is not set and no default value is provided" ); } }; @@ -88,7 +88,7 @@ pub fn render_config(config_content: &[u8]) -> Result { let template = Template::new(template_str).with_regex(&TEMPLATE_ENV_VAR_CAPTURE); let rendered = template .render_string(&values) - .context("Failed to render config file template.")?; + .context("failed to render config file template")?; Ok(rendered) } diff --git a/quickwit/quickwit-control-plane/src/control_plane.rs b/quickwit/quickwit-control-plane/src/control_plane.rs index 11dba08a31c..f2412616a23 100644 --- a/quickwit/quickwit-control-plane/src/control_plane.rs +++ b/quickwit/quickwit-control-plane/src/control_plane.rs @@ -293,7 +293,7 @@ impl Handler for ControlPlane { self.indexing_scheduler_mailbox .send_message(request) .await - .context("Error sending index change notification to index scheduler.")?; + .context("error sending index change notification to index scheduler")?; Ok(Ok(NotifyIndexChangeResponse {})) } } diff --git a/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs b/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs index 8a5ba0e86e0..1a4afaf580d 100644 --- a/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs +++ b/quickwit/quickwit-control-plane/src/ingest/ingest_controller.rs @@ -483,7 +483,7 @@ impl IngestController { .find_leader_and_follower(ctx, &mut unavailable_ingesters) .await .ok_or_else(|| { - ControlPlaneError::Unavailable("No available ingester".to_string()) + ControlPlaneError::Unavailable("no available ingester".to_string()) })?; let open_shards_subrequest = metastore::OpenShardsSubrequest { index_uid: index_uid.into(), @@ -586,7 +586,7 @@ impl Actor for IngestController { async fn initialize(&mut self, ctx: &ActorContext) -> Result<(), ActorExitStatus> { self.load_state(ctx) .await - .context("Failed to initialize ingest controller.")?; + .context("failed to initialize ingest controller")?; Ok(()) } } diff --git a/quickwit/quickwit-control-plane/src/lib.rs b/quickwit/quickwit-control-plane/src/lib.rs index b2b247595fa..650204be433 100644 --- a/quickwit/quickwit-control-plane/src/lib.rs +++ b/quickwit/quickwit-control-plane/src/lib.rs @@ -60,7 +60,7 @@ impl ControlPlaneEventSubscriber { .notify_index_change(NotifyIndexChangeRequest {}) .await { - error!(error=?error, event=event_name, "Failed to notify control plane of index change."); + error!(error=?error, event=event_name, "failed to notify control plane of index change"); } } } diff --git a/quickwit/quickwit-control-plane/src/scheduler.rs b/quickwit/quickwit-control-plane/src/scheduler.rs index b8aea3826e5..5c497831b48 100644 --- a/quickwit/quickwit-control-plane/src/scheduler.rs +++ b/quickwit/quickwit-control-plane/src/scheduler.rs @@ -314,7 +314,7 @@ impl Handler for IndexingScheduler { debug!("Index change notification: schedule indexing plan."); self.schedule_indexing_plan_if_needed() .await - .context("Error when scheduling indexing plan")?; + .context("error when scheduling indexing plan")?; Ok(Ok(NotifyIndexChangeResponse {})) } } diff --git a/quickwit/quickwit-datetime/src/date_time_format.rs b/quickwit/quickwit-datetime/src/date_time_format.rs index 7b5a2afe15f..26ae7436c3a 100644 --- a/quickwit/quickwit-datetime/src/date_time_format.rs +++ b/quickwit/quickwit-datetime/src/date_time_format.rs @@ -52,7 +52,7 @@ impl FromStr for StrptimeParser { strptime_format_str.to_lowercase().contains("%z"), |strptime_format: &String| { parse_to_format_item(strptime_format).map_err(|err| { - format!("Invalid format specification `{strptime_format}`. Error: {err}.") + format!("invalid format specification `{strptime_format}`. error: {err}.") }) }, ) @@ -183,8 +183,8 @@ impl FromStr for DateTimeInputFormat { _ => { if !is_strftime_formatting(date_time_format_str) { return Err(format!( - "Unknown input format: `{date_time_format_str}`. A custom date time \ - format must contain at least one `strftime` special characters." + "unknown input format: `{date_time_format_str}`. a custom date time \ + format must contain at least one `strftime` special characters" )); } DateTimeInputFormat::Strptime(StrptimeParser::from_str(date_time_format_str)?) @@ -285,8 +285,8 @@ impl FromStr for DateTimeOutputFormat { _ => { if !is_strftime_formatting(date_time_format_str) { return Err(format!( - "Unknown output format: `{date_time_format_str}`. A custom date time \ - format must contain at least one `strftime` special characters." + "unknown output format: `{date_time_format_str}`. a custom date time \ + format must contain at least one `strftime` special characters" )); } DateTimeOutputFormat::Strptime(StrptimeParser::from_str(date_time_format_str)?) @@ -417,7 +417,7 @@ mod tests { let error_str = DateTimeInputFormat::from_str(format) .unwrap_err() .to_string(); - assert!(error_str.contains(&format!("Unknown input format: `{format}`."))); + assert!(error_str.contains(&format!("unknown input format: `{format}`"))); } } @@ -428,7 +428,7 @@ mod tests { let error_str = DateTimeOutputFormat::from_str(format) .unwrap_err() .to_string(); - assert!(error_str.contains(&format!("Unknown output format: `{format}`."))); + assert!(error_str.contains(&format!("unknown output format: `{format}`"))); } } } diff --git a/quickwit/quickwit-datetime/src/date_time_parsing.rs b/quickwit/quickwit-datetime/src/date_time_parsing.rs index 464280a202b..af11c0f57bb 100644 --- a/quickwit/quickwit-datetime/src/date_time_parsing.rs +++ b/quickwit/quickwit-datetime/src/date_time_parsing.rs @@ -58,7 +58,7 @@ pub fn parse_date_time_str( } } Err(format!( - "Failed to parse datetime `{date_time_str}` using the following formats: `{}`.", + "failed to parse datetime `{date_time_str}` using the following formats: `{}`", date_time_formats .iter() .map(|date_time_format| date_time_format.as_str()) @@ -72,7 +72,7 @@ pub fn parse_timestamp_float( ) -> Result { if !date_time_formats.contains(&DateTimeInputFormat::Timestamp) { return Err(format!( - "Failed to parse datetime `{timestamp}` using the following formats: `{}`.", + "failed to parse datetime `{timestamp}` using the following formats: `{}`", date_time_formats .iter() .map(|date_time_format| date_time_format.as_str()) @@ -91,7 +91,7 @@ pub fn parse_timestamp_int( ) -> Result { if !date_time_formats.contains(&DateTimeInputFormat::Timestamp) { return Err(format!( - "Failed to parse datetime `{timestamp}` using the following formats: `{}`.", + "failed to parse datetime `{timestamp}` using the following formats: `{}`", date_time_formats .iter() .map(|date_time_format| date_time_format.as_str()) @@ -171,8 +171,8 @@ pub fn parse_timestamp(timestamp: i64) -> Result { Ok(TantivyDateTime::from_timestamp_nanos(timestamp)) } _ => Err(format!( - "Failed to parse unix timestamp `{timestamp}`. Quickwit only support timestamp values \ - ranging from `13 Apr 1972 23:59:55` to `16 Mar 2242 12:56:31`." + "failed to parse unix timestamp `{timestamp}`. Quickwit only support timestamp values \ + ranging from `13 Apr 1972 23:59:55` to `16 Mar 2242 12:56:31`" )), } } @@ -302,7 +302,7 @@ mod tests { .unwrap_err(); assert_eq!( error, - "Failed to parse datetime `foo` using the following formats: `iso8601`, `rfc2822`." + "failed to parse datetime `foo` using the following formats: `iso8601`, `rfc2822`" ); } @@ -356,8 +356,8 @@ mod tests { .unwrap_err(); assert_eq!( error, - "Failed to parse datetime `1668730394917.01` using the following formats: \ - `iso8601`, `rfc2822`." + "failed to parse datetime `1668730394917.01` using the following formats: \ + `iso8601`, `rfc2822`" ); } } @@ -381,8 +381,8 @@ mod tests { .unwrap_err(); assert_eq!( error, - "Failed to parse datetime `1668730394917` using the following formats: `iso8601`, \ - `rfc2822`." + "failed to parse datetime `1668730394917` using the following formats: `iso8601`, \ + `rfc2822`" ); } } @@ -485,26 +485,26 @@ mod tests { { let less_than_supported_date = MIN_TIMESTAMP_SECONDS - 1; let parse_err = parse_timestamp(less_than_supported_date).unwrap_err(); - assert!(parse_err.contains("Failed to parse unix timestamp")); + assert!(parse_err.contains("failed to parse unix timestamp")); } { let greater_than_supported_date = MAX_TIMESTAMP_SECONDS + 1; let parse_err = parse_timestamp(greater_than_supported_date).unwrap_err(); - assert!(parse_err.contains("Failed to parse unix timestamp")); + assert!(parse_err.contains("failed to parse unix timestamp")); } { let unix_epoch = 0; let parse_err = parse_timestamp(unix_epoch).unwrap_err(); - assert!(parse_err.contains("Failed to parse unix timestamp")); + assert!(parse_err.contains("failed to parse unix timestamp")); let parse_err = parse_timestamp(MIN_TIMESTAMP_SECONDS << 7).unwrap_err(); - assert!(parse_err.contains("Failed to parse unix timestamp")); + assert!(parse_err.contains("failed to parse unix timestamp")); let parse_err = parse_timestamp(MIN_TIMESTAMP_SECONDS << 17).unwrap_err(); - assert!(parse_err.contains("Failed to parse unix timestamp")); + assert!(parse_err.contains("failed to parse unix timestamp")); let parse_err = parse_timestamp(MIN_TIMESTAMP_SECONDS << 27).unwrap_err(); - assert!(parse_err.contains("Failed to parse unix timestamp")); + assert!(parse_err.contains("failed to parse unix timestamp")); } } diff --git a/quickwit/quickwit-directories/src/hot_directory.rs b/quickwit/quickwit-directories/src/hot_directory.rs index c7b3cd71f90..3c78590e575 100644 --- a/quickwit/quickwit-directories/src/hot_directory.rs +++ b/quickwit/quickwit-directories/src/hot_directory.rs @@ -60,11 +60,11 @@ impl VersionedComponent for HotDirectoryVersions { match self { Self::V1 => { if bytes.len() < 4 { - bail!("Data too short (len={}).", bytes.len()); + bail!("data too short (len={})", bytes.len()); } let len = bytes.read_u32() as usize; let hot_directory_meta = postcard::from_bytes(&bytes.as_slice()[..len]) - .context("Failed to deserialize Hot Directory Meta")?; + .context("failed to deserialize hot directory meta")?; bytes.advance(len); Ok(hot_directory_meta) } @@ -244,7 +244,7 @@ impl StaticSliceCache { let (body, idx) = body.split(body_len as usize); let idx_bytes = idx.as_slice(); let index: SliceCacheIndex = postcard::from_bytes(idx_bytes).map_err(|err| { - DataCorruption::comment_only(format!("Failed to deserialize the slice index: {err:?}")) + DataCorruption::comment_only(format!("failed to deserialize the slice index: {err:?}")) })?; Ok(StaticSliceCache { bytes: body, index }) } @@ -309,7 +309,7 @@ impl StaticSliceCacheBuilder { for segment in &self.slices[1..] { if segment.range().start < last.range().end { return Err(tantivy::TantivyError::InvalidArgument(format!( - "Two segments are overlapping on byte {}", + "two segments are overlapping on byte {}", segment.range().start ))); } @@ -335,7 +335,7 @@ impl StaticSliceCacheBuilder { }; self.wrt.extend_from_slice( &postcard::to_allocvec(&slices_idx).map_err(|err| { - TantivyError::InternalError(format!("Could not serialize {err:?}")) + TantivyError::InternalError(format!("could not serialize {err:?}")) })?, ); self.wrt.extend_from_slice(&self.offset.to_le_bytes()[..]); diff --git a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/date_time_type.rs b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/date_time_type.rs index 52eb64bafd5..c418cc0721f 100644 --- a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/date_time_type.rs +++ b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/date_time_type.rs @@ -82,7 +82,7 @@ impl QuickwitDateTimeOptions { quickwit_datetime::parse_timestamp_float(timestamp_f64, &self.input_formats.0)? } else { return Err(format!( - "Failed to parse datetime `{timestamp:?}`: value is larger than i64::MAX.", + "failed to parse datetime `{timestamp:?}`: value is larger than i64::MAX", )); } } @@ -91,8 +91,8 @@ impl QuickwitDateTimeOptions { } _ => { return Err(format!( - "Failed to parse datetime: expected a float, integer, or string, got \ - `{json_value}`." + "failed to parse datetime: expected a float, integer, or string, got \ + `{json_value}`" )) } }; @@ -316,7 +316,7 @@ mod tests { let error = serde_json::from_str::(input_formats_json) .unwrap_err() .to_string(); - assert!(error.contains("Invalid format specification")); + assert!(error.contains("invalid format specification")); } } diff --git a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper.rs b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper.rs index e1687a31c0d..f0802ceb1cc 100644 --- a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper.rs +++ b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper.rs @@ -117,25 +117,25 @@ fn validate_timestamp_field( mapping_root_node: &MappingNode, ) -> anyhow::Result<()> { if timestamp_field_path.starts_with('.') || timestamp_field_path.starts_with("\\.") { - bail!("Timestamp field `{timestamp_field_path}` should not start with a `.`."); + bail!("timestamp field `{timestamp_field_path}` should not start with a `.`"); } if timestamp_field_path.ends_with('.') { - bail!("Timestamp field `{timestamp_field_path}` should not end with a `.`."); + bail!("timestamp field `{timestamp_field_path}` should not end with a `.`"); } let Some(timestamp_field_type) = mapping_root_node.find_field_mapping_type(timestamp_field_path) else { - bail!("Could not find timestamp field `{timestamp_field_path}` in field mappings."); + bail!("could not find timestamp field `{timestamp_field_path}` in field mappings"); }; if let FieldMappingType::DateTime(date_time_option, cardinality) = ×tamp_field_type { if cardinality != &Cardinality::SingleValue { - bail!("Timestamp field `{timestamp_field_path}` should be single-valued."); + bail!("timestamp field `{timestamp_field_path}` should be single-valued"); } if !date_time_option.fast { - bail!("Timestamp field `{timestamp_field_path}` should be a fast field."); + bail!("timestamp field `{timestamp_field_path}` should be a fast field"); } } else { - bail!("Timestamp field `{timestamp_field_path}` should be a datetime field."); + bail!("timestamp field `{timestamp_field_path}` should be a datetime field"); } Ok(()) } @@ -173,7 +173,7 @@ impl TryFrom for DefaultDocMapper { for tokenizer_config_entry in builder.tokenizers.iter() { if custom_tokenizer_names.contains(&tokenizer_config_entry.name) { bail!( - "Duplicated custom tokenizer: `{}`", + "duplicated custom tokenizer: `{}`", tokenizer_config_entry.name ); } @@ -182,8 +182,8 @@ impl TryFrom for DefaultDocMapper { .is_some() { bail!( - "Custom tokenizer name `{}` should be different from built-in tokenizer's \ - names.", + "custom tokenizer name `{}` should be different from built-in tokenizer's \ + names", tokenizer_config_entry.name ); } @@ -192,7 +192,7 @@ impl TryFrom for DefaultDocMapper { .text_analyzer() .map_err(|error| { anyhow::anyhow!( - "Failed to build tokenizer `{}`: {:?}", + "failed to build tokenizer `{}`: {:?}", tokenizer_config_entry.name, error ) @@ -207,7 +207,7 @@ impl TryFrom for DefaultDocMapper { for default_search_field_name in &builder.default_search_fields { if default_search_field_names.contains(default_search_field_name) { bail!( - "Duplicated default search field: `{}`", + "duplicated default search field: `{}`", default_search_field_name ) } @@ -218,7 +218,7 @@ impl TryFrom for DefaultDocMapper { format!("Unknown default search field: `{default_search_field_name}`") })?; if !schema.get_field_entry(default_search_field).is_indexed() { - bail!("Default search field `{default_search_field_name}` is not indexed.",); + bail!("default search field `{default_search_field_name}` is not indexed",); } default_search_field_names.push(default_search_field_name.clone()); } @@ -231,7 +231,7 @@ impl TryFrom for DefaultDocMapper { let partition_key_expr: &str = builder.partition_key.as_deref().unwrap_or(""); let partition_key = RoutingExpr::new(partition_key_expr).with_context(|| { - format!("Failed to interpret the partition key: `{partition_key_expr}`") + format!("failed to interpret the partition key: `{partition_key_expr}`") })?; // If valid, partition key fields should be considered as tags. @@ -269,14 +269,14 @@ impl TryFrom for DefaultDocMapper { /// - the field must be indexed. fn validate_tag(tag_field_name: &str, schema: &Schema) -> Result<(), anyhow::Error> { if tag_field_name.starts_with('.') || tag_field_name.starts_with("\\.") { - bail!("Tag field `{tag_field_name}` should not start with a `.`."); + bail!("tag field `{tag_field_name}` should not start with a `.`"); } if tag_field_name.ends_with('.') { - bail!("Tag field `{tag_field_name}` should not end with a `.`."); + bail!("tag field `{tag_field_name}` should not end with a `.`"); } let field = schema .get_field(tag_field_name) - .with_context(|| format!("Unknown tag field: `{tag_field_name}`"))?; + .with_context(|| format!("unknown tag field: `{tag_field_name}`"))?; let field_type = schema.get_field_entry(field).field_type(); match field_type { FieldType::Str(options) => { @@ -284,7 +284,7 @@ fn validate_tag(tag_field_name: &str, schema: &Schema) -> Result<(), anyhow::Err .get_indexing_options() .map(|text_options: &tantivy::schema::TextFieldIndexing| text_options.tokenizer()); if tokenizer_opt != Some(RAW_TOKENIZER_NAME) { - bail!("Tags collection is only allowed on text fields with the `raw` tokenizer."); + bail!("tags collection is only allowed on text fields with the `raw` tokenizer"); } } FieldType::U64(_) | FieldType::I64(_) => { @@ -299,14 +299,14 @@ fn validate_tag(tag_field_name: &str, schema: &Schema) -> Result<(), anyhow::Err // avoid a "ZRP because you searched you searched for 0.100 instead of 0.1", // or `myflag:1`, `myflag:True` instead of `myflag:true`. bail!( - "Tags collection is not allowed on `{}` fields.", + "tags collection is not allowed on `{}` fields", field_type.value_type().name().to_lowercase() ) } } if !field_type.is_indexed() { bail!( - "Tag fields are required to be indexed. (`{}` is not configured as indexed).", + "tag fields are required to be indexed. (`{}` is not configured as indexed)", tag_field_name ) } @@ -331,7 +331,7 @@ fn validate_fields_tokenizers( if let Some(tokenizer_name) = tokenizer_name_opt { if tokenizer_manager.get(tokenizer_name).is_none() { bail!( - "Unknown tokenizer `{}` for field `{}`.", + "unknown tokenizer `{}` for field `{}`", tokenizer_name, field_entry.name() ); @@ -392,13 +392,13 @@ fn extract_single_obj( }; if values.len() > 1 { bail!( - "Invalid named document. There are more than 1 value associated to the `{key}` field." + "invalid named document. there are more than 1 value associated to the `{key}` field" ); } match values.pop() { Some(TantivyValue::JsonObject(dynamic_json_obj)) => Ok(Some(dynamic_json_obj)), Some(_) => { - bail!("The `{key}` value has to be a json object."); + bail!("the `{key}` value has to be a json object"); } None => Ok(None), } @@ -769,7 +769,7 @@ mod tests { error, DocParsingError::ValueError( "body".to_owned(), - "Expected JSON string, got `1`.".to_owned() + "expected JSON string, got `1`".to_owned() ) ); Ok(()) @@ -857,7 +857,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Timestamp field `.my.timestamp` should not start with a `.`.", + "timestamp field `.my.timestamp` should not start with a `.`", ); assert_eq!( @@ -875,7 +875,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Timestamp field `\\.my\\.timestamp` should not start with a `.`.", + "timestamp field `\\.my\\.timestamp` should not start with a `.`", ) } @@ -889,7 +889,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Timestamp field `my.timestamp.` should not end with a `.`.", + "timestamp field `my.timestamp.` should not end with a `.`", ); assert_eq!( @@ -900,7 +900,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Timestamp field `my\\.timestamp\\.` should not end with a `.`.", + "timestamp field `my\\.timestamp\\.` should not end with a `.`", ) } @@ -914,7 +914,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Tag field `.my.tag` should not start with a `.`.", + "tag field `.my.tag` should not start with a `.`", ); assert_eq!( @@ -925,7 +925,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Tag field `\\.my\\.tag` should not start with a `.`.", + "tag field `\\.my\\.tag` should not start with a `.`", ) } @@ -939,7 +939,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Tag field `my.tag.` should not end with a `.`.", + "tag field `my.tag.` should not end with a `.`", ); assert_eq!( @@ -950,7 +950,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Tag field `my\\.tag\\.` should not end with a `.`.", + "tag field `my\\.tag\\.` should not end with a `.`", ) } @@ -967,7 +967,7 @@ mod tests { ] }"#; let builder = serde_json::from_str::(doc_mapper).unwrap(); - let expected_msg = "Timestamp field `timestamp` should be a datetime field."; + let expected_msg = "timestamp field `timestamp` should be a datetime field"; assert_eq!(&builder.try_build().unwrap_err().to_string(), &expected_msg); } @@ -986,7 +986,7 @@ mod tests { ] }"#; let builder = serde_json::from_str::(doc_mapper).unwrap(); - let expected_msg = "Timestamp field `timestamp` should be a fast field."; + let expected_msg = "timestamp field `timestamp` should be a fast field"; assert_eq!(&builder.try_build().unwrap_err().to_string(), &expected_msg); } @@ -1000,7 +1000,7 @@ mod tests { ] }"#; let builder = serde_json::from_str::(doc_mapper).unwrap(); - let expected_msg = "Duplicated field definition `body`."; + let expected_msg = "duplicated field definition `body`"; assert_eq!(&builder.try_build().unwrap_err().to_string(), expected_msg); } @@ -1019,7 +1019,7 @@ mod tests { ] }"#; let builder = serde_json::from_str::(doc_mapper).unwrap(); - let expected_msg = "Duplicated field definition `username`."; + let expected_msg = "duplicated field definition `username`"; assert_eq!(&builder.try_build().unwrap_err().to_string(), expected_msg); } } @@ -1059,7 +1059,7 @@ mod tests { }"#; let builder = serde_json::from_str::(doc_mapper).unwrap(); - let expected_msg = "Timestamp field `timestamp` should be single-valued."; + let expected_msg = "timestamp field `timestamp` should be single-valued"; assert_eq!(&builder.try_build().unwrap_err().to_string(), expected_msg); } @@ -1080,7 +1080,7 @@ mod tests { .unwrap(); assert!(deser_err .to_string() - .contains("The following fields are reserved for Quickwit internal usage")); + .contains("the following fields are reserved for Quickwit internal usage")); } #[test] @@ -1104,7 +1104,7 @@ mod tests { "image": "invalid base64 data" }"#, ); - let expected_msg = "The field `image` could not be parsed: Expected base64 string, got \ + let expected_msg = "the field `image` could not be parsed: expected base64 string, got \ `invalid base64 data`: Invalid byte 32, offset 7."; assert_eq!(result.unwrap_err().to_string(), expected_msg); Ok(()) @@ -1313,7 +1313,7 @@ mod tests { .try_build() .unwrap_err() .to_string(), - "Tags collection is only allowed on text fields with the `raw` tokenizer.".to_string(), + "tags collection is only allowed on text fields with the `raw` tokenizer".to_string(), ); let doc_mapper_two = r#"{ @@ -1331,7 +1331,7 @@ mod tests { .try_build() .unwrap_err() .to_string(), - "Tags collection is not allowed on `bytes` fields.".to_string(), + "tags collection is not allowed on `bytes` fields".to_string(), ); Ok(()) } @@ -1569,7 +1569,7 @@ mod tests { }) ); } else { - panic!("Expected json"); + panic!("expected json"); } } @@ -1597,7 +1597,7 @@ mod tests { .unwrap(); assert_eq!( default_doc_mapper_query_aux(&doc_mapper, "body.wrong_field:hello").unwrap_err(), - "Invalid query: Field does not exist: `body.wrong_field`" + "invalid query: field does not exist: `body.wrong_field`" ); } @@ -1821,7 +1821,7 @@ mod tests { .unwrap(); let mapper = mapper_builder.try_build(); let error_msg = mapper.unwrap_err().to_string(); - assert!(error_msg.contains("Unknown tokenizer")); + assert!(error_msg.contains("unknown tokenizer")); } #[test] @@ -1878,7 +1878,7 @@ mod tests { let mapper = mapper_builder.try_build(); assert!(mapper.is_err()); let error_mesg = mapper.unwrap_err().to_string(); - assert!(error_mesg.contains("Invalid regex tokenizer")); + assert!(error_mesg.contains("invalid regex tokenizer")); } #[test] diff --git a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper_builder.rs b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper_builder.rs index e216c2c8365..8367b33ffb6 100644 --- a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper_builder.rs +++ b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/default_mapper_builder.rs @@ -115,7 +115,7 @@ impl Mode { (ModeType::Dynamic, Some(dynamic_mapping)) => Mode::Dynamic(dynamic_mapping), (ModeType::Dynamic, None) => Mode::default(), // Dynamic with default options (_, Some(_)) => anyhow::bail!( - "`dynamic_mapping` is only allowed with mode=dynamic. (Here mode=`{:?}`)", + "`dynamic_mapping` is only allowed with mode=dynamic. (here mode=`{:?}`)", mode ), }) diff --git a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/field_mapping_entry.rs b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/field_mapping_entry.rs index 16bc953c926..602178996f8 100644 --- a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/field_mapping_entry.rs +++ b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/field_mapping_entry.rs @@ -198,7 +198,7 @@ impl BinaryFormat { byte_str } else { return Err(format!( - "Expected {} string, got `{json_val}`.", + "expected {} string, got `{json_val}`", self.as_str() )); }; @@ -206,10 +206,10 @@ impl BinaryFormat { Self::Base64 => BASE64_STANDARD .decode(&byte_str) .map_err(|base64_decode_err| { - format!("Expected base64 string, got `{byte_str}`: {base64_decode_err}") + format!("expected base64 string, got `{byte_str}`: {base64_decode_err}") })?, Self::Hex => hex::decode(&byte_str).map_err(|hex_decode_err| { - format!("Expected hex string, got `{byte_str}`: {hex_decode_err}") + format!("expected hex string, got `{byte_str}`: {hex_decode_err}") })?, }; Ok(TantivyValue::Bytes(payload)) @@ -314,7 +314,7 @@ impl TextIndexingOptions { if tokenizer.is_some() || record.is_some() || fieldnorms { bail!( "`record`, `tokenizer`, and `fieldnorms` parameters are allowed only if \ - indexed is true." + indexed is true" ) } Ok(None) @@ -334,7 +334,7 @@ impl TextIndexingOptions { })) } else { if tokenizer.is_some() || record.is_some() { - bail!("`record` and `tokenizer` parameters are allowed only if indexed is true.") + bail!("`record` and `tokenizer` parameters are allowed only if indexed is true") } Ok(None) } @@ -632,7 +632,7 @@ fn deserialize_mapping_type( QuickwitFieldType::Object => { let object_options: QuickwitObjectOptions = serde_json::from_value(json)?; if object_options.field_mappings.is_empty() { - anyhow::bail!("object type must have at least one field mapping."); + anyhow::bail!("object type must have at least one field mapping"); } return Ok(FieldMappingType::Object(object_options)); } @@ -670,7 +670,7 @@ fn deserialize_mapping_type( Type::Bytes => { let numeric_options: QuickwitBytesOptions = serde_json::from_value(json)?; if numeric_options.fast && cardinality == Cardinality::MultiValues { - bail!("fast field is not allowed for array."); + bail!("fast field is not allowed for array"); } Ok(FieldMappingType::Bytes(numeric_options, cardinality)) } @@ -689,7 +689,7 @@ impl TryFrom for FieldMappingEntry { let quickwit_field_type = QuickwitFieldType::parse_type_id(&value.type_id).ok_or_else(|| { format!( - "Field `{}` has an unknown type: `{}`.", + "field `{}` has an unknown type: `{}`", &value.name, &value.type_id ) })?; @@ -697,7 +697,7 @@ impl TryFrom for FieldMappingEntry { quickwit_field_type, JsonValue::Object(value.field_mapping_json), ) - .map_err(|err| format!("Error while parsing field `{}`: {}", value.name, err))?; + .map_err(|err| format!("error while parsing field `{}`: {}", value.name, err))?; Ok(FieldMappingEntry { name: value.name, mapping_type, @@ -836,8 +836,8 @@ mod tests { let error = result.unwrap_err(); assert_eq!( error.to_string(), - "Error while parsing field `data_binary`: `record`, `tokenizer`, and `fieldnorms` \ - parameters are allowed only if indexed is true." + "error while parsing field `data_binary`: `record`, `tokenizer`, and `fieldnorms` \ + parameters are allowed only if indexed is true" ); } @@ -879,8 +879,8 @@ mod tests { let error = result.unwrap_err(); assert_eq!( error.to_string(), - "Error while parsing field `data_binary`: `record` and `tokenizer` parameters are \ - allowed only if indexed is true." + "error while parsing field `data_binary`: `record` and `tokenizer` parameters are \ + allowed only if indexed is true" ); } @@ -899,7 +899,7 @@ mod tests { assert!(mapping_entry.is_err()); assert_eq!( mapping_entry.unwrap_err().to_string(), - "Error while parsing field `my_field_name`: unknown variant `notexist`, expected one \ + "error while parsing field `my_field_name`: unknown variant `notexist`, expected one \ of `basic`, `freq`, `position`" .to_string() ); @@ -921,7 +921,7 @@ mod tests { assert!(mapping_entry .unwrap_err() .to_string() - .contains("Error while parsing field `my_field_name`: unknown field `blub`")); + .contains("error while parsing field `my_field_name`: unknown field `blub`")); Ok(()) } @@ -994,8 +994,8 @@ mod tests { let error = result.unwrap_err(); assert_eq!( error.to_string(), - "Error while parsing field `my_field_name`: `record`, `tokenizer`, and `fieldnorms` \ - parameters are allowed only if indexed is true." + "error while parsing field `my_field_name`: `record`, `tokenizer`, and `fieldnorms` \ + parameters are allowed only if indexed is true" ); } @@ -1059,8 +1059,8 @@ mod tests { let error = result.unwrap_err(); assert_eq!( error.to_string(), - "Error while parsing field `my_field_name`: object type must have at least one field \ - mapping." + "error while parsing field `my_field_name`: object type must have at least one field \ + mapping" ); } @@ -1078,7 +1078,7 @@ mod tests { let error = result.unwrap_err(); assert_eq!( error.to_string(), - "Field `my_field_name` has an unknown type: `my custom type`." + "field `my_field_name` has an unknown type: `my custom type`" ); } @@ -1112,7 +1112,7 @@ mod tests { assert_eq!( error.to_string(), - "Error while parsing field `my_field_name`: unknown field `tokenizer`, expected one \ + "error while parsing field `my_field_name`: unknown field `tokenizer`, expected one \ of `description`, `stored`, `indexed`, `fast`, `coerce`, `output_format`" ); } @@ -1204,7 +1204,7 @@ mod tests { ) .unwrap_err() .to_string(), - "Error while parsing field `my_field_name`: unknown field `tokenizer`, expected one \ + "error while parsing field `my_field_name`: unknown field `tokenizer`, expected one \ of `description`, `stored`, `indexed`, `fast`, `coerce`, `output_format`" ); } @@ -1560,8 +1560,7 @@ mod tests { .unwrap(); assert_eq!( err.to_string(), - "Error while parsing field `my_field_name`: fast field is not allowed for \ - array.", + "error while parsing field `my_field_name`: fast field is not allowed for array", ); } diff --git a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mapping_tree.rs b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mapping_tree.rs index 4db71471f9c..8eaa22cc9d9 100644 --- a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mapping_tree.rs +++ b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mapping_tree.rs @@ -61,7 +61,7 @@ impl LeafType { if let JsonValue::String(text) = json_val { Ok(TantivyValue::Str(text)) } else { - Err(format!("Expected JSON string, got `{json_val}`.")) + Err(format!("expected JSON string, got `{json_val}`")) } } LeafType::I64(numeric_options) => i64::from_json(json_val, numeric_options.coerce), @@ -71,17 +71,17 @@ impl LeafType { if let JsonValue::Bool(val) = json_val { Ok(TantivyValue::Bool(val)) } else { - Err(format!("Expected bool value, got `{json_val}`.")) + Err(format!("expected bool value, got `{json_val}`")) } } LeafType::IpAddr(_) => { if let JsonValue::String(ip_address) = json_val { let ipv6_value = IpAddr::from_str(ip_address.as_str()) - .map_err(|err| format!("Failed to parse IP address `{ip_address}`: {err}"))? + .map_err(|err| format!("failed to parse IP address `{ip_address}`: {err}"))? .into_ipv6_addr(); Ok(TantivyValue::IpAddr(ipv6_value)) } else { - Err(format!("Expected string value, got `{json_val}`.")) + Err(format!("expected string value, got `{json_val}`")) } } LeafType::DateTime(date_time_options) => date_time_options.parse_json(json_val), @@ -90,7 +90,7 @@ impl LeafType { if let JsonValue::Object(json_obj) = json_val { Ok(TantivyValue::JsonObject(json_obj)) } else { - Err(format!("Expected JSON object got `{json_val}`.")) + Err(format!("expected JSON object got `{json_val}`")) } } } @@ -247,7 +247,7 @@ trait NumVal: Sized + FromStr + ToString + Into { .map(Self::into) .ok_or_else(|| { format!( - "Expected {}, got inconvertible JSON number `{}`.", + "expected {}, got inconvertible JSON number `{}`", type_name::(), num_val ) @@ -256,23 +256,23 @@ trait NumVal: Sized + FromStr + ToString + Into { if coerce { str_val.parse::().map(Self::into).map_err(|_| { format!( - "Failed to coerce JSON string `\"{str_val}\"` to {}.", + "failed to coerce JSON string `\"{str_val}\"` to {}", type_name::() ) }) } else { Err(format!( - "Expected JSON number, got string `\"{str_val}\"`. Enable coercion to {} \ - with the `coerce` parameter in the field mapping.", + "expected JSON number, got string `\"{str_val}\"`. enable coercion to {} \ + with the `coerce` parameter in the field mapping", type_name::() )) } } _ => { let message = if coerce { - format!("Expected JSON number or string, got `{json_val}`.") + format!("expected JSON number or string, got `{json_val}`") } else { - format!("Expected JSON number, got `{json_val}`.") + format!("expected JSON number, got `{json_val}`") }; Err(message) } @@ -502,7 +502,7 @@ impl MappingTree { } else { Err(DocParsingError::ValueError( path.join("."), - format!("Expected an JSON Object, got {json_value}"), + format!("expected an JSON object, got {json_value}"), )) } } @@ -543,7 +543,7 @@ fn build_mapping_tree_from_entries<'a>( for entry in entries { field_path.push(&entry.name); if mapping_node.branches.contains_key(&entry.name) { - bail!("Duplicated field definition `{}`.", entry.name); + bail!("duplicated field definition `{}`", entry.name); } let child_tree = build_mapping_from_field_type(&entry.mapping_type, field_path, schema)?; field_path.pop(); @@ -901,7 +901,7 @@ mod tests { ); assert_eq!( leaf.value_from_json(json!("foo")).unwrap_err(), - "Failed to coerce JSON string `\"foo\"` to u64." + "failed to coerce JSON string `\"foo\"` to u64" ); let numeric_options = QuickwitNumericOptions { @@ -911,8 +911,8 @@ mod tests { let leaf = LeafType::U64(numeric_options); assert_eq!( leaf.value_from_json(json!("20")).unwrap_err(), - "Expected JSON number, got string `\"20\"`. Enable coercion to u64 with the `coerce` \ - parameter in the field mapping." + "expected JSON number, got string `\"20\"`. enable coercion to u64 with the `coerce` \ + parameter in the field mapping" ); } @@ -921,7 +921,7 @@ mod tests { let leaf = LeafType::U64(QuickwitNumericOptions::default()); assert_eq!( leaf.value_from_json(json!(-20i64)).unwrap_err(), - "Expected u64, got inconvertible JSON number `-20`." + "expected u64, got inconvertible JSON number `-20`" ); } @@ -939,7 +939,7 @@ mod tests { let leaf = LeafType::I64(QuickwitNumericOptions::default()); assert_eq!( leaf.value_from_json(json!(20.2f64)).unwrap_err(), - "Expected i64, got inconvertible JSON number `20.2`." + "expected i64, got inconvertible JSON number `20.2`" ); } @@ -949,7 +949,7 @@ mod tests { let err = leaf.value_from_json(json!(u64::max_value())).err().unwrap(); assert_eq!( err, - "Expected i64, got inconvertible JSON number `18446744073709551615`." + "expected i64, got inconvertible JSON number `18446744073709551615`" ); } @@ -1016,10 +1016,10 @@ mod tests { fn test_parse_ip_addr_should_error() { let typ = LeafType::IpAddr(QuickwitIpAddrOptions::default()); let err = typ.value_from_json(json!("foo")).err().unwrap(); - assert!(err.contains("Failed to parse IP address `foo`")); + assert!(err.contains("failed to parse IP address `foo`")); let err = typ.value_from_json(json!(1200)).err().unwrap(); - assert!(err.contains("Expected string value, got `1200`")); + assert!(err.contains("expected string value, got `1200`")); } #[test] @@ -1099,8 +1099,8 @@ mod tests { .unwrap_err(); assert_eq!( parse_err.to_string(), - "The field `root.my_field` could not be parsed: Expected JSON number or string, got \ - `[1,2]`." + "the field `root.my_field` could not be parsed: expected JSON number or string, got \ + `[1,2]`" ); } @@ -1118,7 +1118,7 @@ mod tests { fn test_parse_text_number_should_error() { let typ = LeafType::Text(QuickwitTextOptions::default()); let err = typ.value_from_json(json!(2u64)).err().unwrap(); - assert_eq!(err, "Expected JSON string, got `2`."); + assert_eq!(err, "expected JSON string, got `2`"); } #[test] @@ -1162,8 +1162,8 @@ mod tests { let err = typ.value_from_json(json!("foo-datetime")).unwrap_err(); assert_eq!( err, - "Failed to parse datetime `foo-datetime` using the following formats: `rfc3339`, \ - `unix_timestamp`." + "failed to parse datetime `foo-datetime` using the following formats: `rfc3339`, \ + `unix_timestamp`" ); } @@ -1173,8 +1173,8 @@ mod tests { let err = typ.value_from_json(json!(["foo", "bar"])).err().unwrap(); assert_eq!( err, - "Failed to parse datetime: expected a float, integer, or string, got \ - `[\"foo\",\"bar\"]`." + "failed to parse datetime: expected a float, integer, or string, got \ + `[\"foo\",\"bar\"]`" ); } @@ -1208,7 +1208,7 @@ mod tests { fn test_parse_bytes_number_should_err() { let typ = LeafType::Bytes(QuickwitBytesOptions::default()); let error = typ.value_from_json(json!(2u64)).err().unwrap(); - assert_eq!(error, "Expected base64 string, got `2`."); + assert_eq!(error, "expected base64 string, got `2`"); } #[test] @@ -1217,7 +1217,7 @@ mod tests { let error = typ.value_from_json(json!("dEwerwer#!%")).err().unwrap(); assert_eq!( error, - "Expected base64 string, got `dEwerwer#!%`: Invalid byte 35, offset 8." + "expected base64 string, got `dEwerwer#!%`: Invalid byte 35, offset 8." ); } diff --git a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mod.rs b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mod.rs index 23b96effbb1..42beb6a6912 100644 --- a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mod.rs +++ b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/mod.rs @@ -64,8 +64,8 @@ pub fn validate_field_mapping_name(field_mapping_name: &str) -> anyhow::Result<( if QW_RESERVED_FIELD_NAMES.contains(&field_mapping_name) { bail!( - "Field name `{field_mapping_name}` is reserved. The following fields are reserved for \ - Quickwit internal usage: {}.", + "field name `{field_mapping_name}` is reserved. the following fields are reserved for \ + Quickwit internal usage: {}", QW_RESERVED_FIELD_NAMES.join(", "), ); } @@ -73,31 +73,31 @@ pub fn validate_field_mapping_name(field_mapping_name: &str) -> anyhow::Result<( return Ok(()); } if field_mapping_name.is_empty() { - bail!("Field name is empty."); + bail!("field name is empty"); } if field_mapping_name.starts_with('.') { bail!( - "Field name `{}` must not start with a dot `.`", + "field name `{}` must not start with a dot `.`", field_mapping_name ); } if field_mapping_name.len() > 255 { bail!( - "Field name `{}` is too long. Field names must not be longer than 255 characters.", + "field name `{}` is too long. field names must not be longer than 255 characters", field_mapping_name ) } let first_char = field_mapping_name.chars().next().unwrap(); if !first_char.is_ascii_alphabetic() { bail!( - "Field name `{}` is invalid. Field names must start with an uppercase or lowercase \ - ASCII letter, or an underscore `_`.", + "field name `{}` is invalid. field names must start with an uppercase or lowercase \ + ASCII letter, or an underscore `_`", field_mapping_name ) } bail!( - "Field name `{}` contains illegal characters. Field names must only contain uppercase and \ - lowercase ASCII letters, digits, hyphens `-`, periods `.`, and underscores `_`.", + "field name `{}` contains illegal characters. field names must only contain uppercase and \ + lowercase ASCII letters, digits, hyphens `-`, periods `.`, and underscores `_`", field_mapping_name ); } diff --git a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/tokenizer_entry.rs b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/tokenizer_entry.rs index bc2519bbd23..0a067381f4f 100644 --- a/quickwit/quickwit-doc-mapper/src/default_doc_mapper/tokenizer_entry.rs +++ b/quickwit/quickwit-doc-mapper/src/default_doc_mapper/tokenizer_entry.rs @@ -59,12 +59,12 @@ impl TokenizerConfig { TokenizerType::Ngram(options) => { let tokenizer = NgramTokenizer::new(options.min_gram, options.max_gram, options.prefix_only) - .with_context(|| "Invalid ngram tokenizer".to_string())?; + .with_context(|| "invalid ngram tokenizer".to_string())?; TextAnalyzer::builder(tokenizer).dynamic() } TokenizerType::Regex(options) => { let tokenizer = RegexTokenizer::new(&options.pattern) - .with_context(|| "Invalid regex tokenizer".to_string())?; + .with_context(|| "invalid regex tokenizer".to_string())?; TextAnalyzer::builder(tokenizer).dynamic() } }; diff --git a/quickwit/quickwit-doc-mapper/src/doc_mapper.rs b/quickwit/quickwit-doc-mapper/src/doc_mapper.rs index a2174cffc72..197ad7cad1b 100644 --- a/quickwit/quickwit-doc-mapper/src/doc_mapper.rs +++ b/quickwit/quickwit-doc-mapper/src/doc_mapper.rs @@ -62,7 +62,7 @@ pub trait DocMapper: Send + Sync + Debug + DynClone + 'static { let json_obj: JsonObject = serde_json::from_slice(json_doc).map_err(|_| { let json_doc_sample: String = std::str::from_utf8(json_doc) .map(|doc_str| doc_str.chars().take(20).chain("...".chars()).collect()) - .unwrap_or_else(|_| "Document contains some invalid UTF-8 characters.".to_string()); + .unwrap_or_else(|_| "document contains some invalid UTF-8 characters".to_string()); DocParsingError::NotJsonObject(json_doc_sample) })?; self.doc_from_json_obj(json_obj) @@ -132,7 +132,7 @@ pub trait DocMapper: Send + Sync + Debug + DynClone + 'static { .map(|field_name| { index_schema .get_field(field_name) - .context(format!("Field `{field_name}` must exist in the schema.")) + .context(format!("field `{field_name}` must exist in the schema")) .map(|field| NamedField { name: field_name.clone(), field, diff --git a/quickwit/quickwit-doc-mapper/src/error.rs b/quickwit/quickwit-doc-mapper/src/error.rs index 0920fcf660c..1488b4a2c7a 100644 --- a/quickwit/quickwit-doc-mapper/src/error.rs +++ b/quickwit/quickwit-doc-mapper/src/error.rs @@ -25,11 +25,11 @@ use thiserror::Error; #[derive(Error, Debug)] #[allow(missing_docs)] pub enum QueryParserError { - #[error("Invalid json: {0}")] + #[error("invalid json: {0}")] InvalidJson(#[from] serde_json::Error), - #[error("Invalid query: {0}")] + #[error("invalid query: {0}")] InvalidQuery(#[from] InvalidQuery), - #[error("Invalid default search field: `{field_name}` {cause}")] + #[error("invalid default search field: `{field_name}` {cause}")] InvalidDefaultField { cause: &'static str, field_name: String, @@ -43,19 +43,19 @@ pub enum QueryParserError { #[derive(Debug, Error, Eq, PartialEq)] pub enum DocParsingError { /// The provided string is not a syntactically valid JSON object. - #[error("The provided string is not a syntactically valid JSON object: {0}")] + #[error("the provided string is not a syntactically valid JSON object: {0}")] NotJsonObject(String), /// One of the value could not be parsed. - #[error("The field `{0}` could not be parsed: {1}")] + #[error("the field `{0}` could not be parsed: {1}")] ValueError(String, String), /// The json-document contains a field that is not declared in the schema. - #[error("The document contains a field that is not declared in the schema: {0:?}")] + #[error("the document contains a field that is not declared in the schema: {0:?}")] NoSuchFieldInSchema(String), /// The document contains a array of values but a single value is expected. - #[error("The document contains an array of values but a single value is expected: {0:?}")] + #[error("the document contains an array of values but a single value is expected: {0:?}")] MultiValuesNotSupported(String), /// The document does not contain a field that is required. - #[error("The document must contain field {0:?}.")] + #[error("the document must contain field {0:?}")] RequiredField(String), } diff --git a/quickwit/quickwit-doc-mapper/src/query_builder.rs b/quickwit/quickwit-doc-mapper/src/query_builder.rs index 78d7d05bdd7..b0532ec0d9e 100644 --- a/quickwit/quickwit-doc-mapper/src/query_builder.rs +++ b/quickwit/quickwit-doc-mapper/src/query_builder.rs @@ -337,13 +337,13 @@ mod test { (Err(query_err_msg), TestExpectation::Err(sub_str)) => { assert!( query_err_msg.contains(sub_str), - "Query error received is {query_err_msg}. It should contain {sub_str}" + "query error received is {query_err_msg}. it should contain {sub_str}" ); } (Ok(query_str), TestExpectation::Ok(sub_str)) => { assert!( query_str.contains(sub_str), - "Error query parsing {query_str} should contain {sub_str}" + "error query parsing {query_str} should contain {sub_str}" ); } (Err(error_msg), TestExpectation::Ok(expectation)) => { @@ -374,14 +374,14 @@ mod test { "title:[a TO b]", Vec::new(), TestExpectation::Err( - "Range queries are only supported for fast fields. (`title` is not a fast field)", + "range queries are only supported for fast fields. (`title` is not a fast field)", ), ); check_build_query_dynamic_mode( "title:{a TO b} desc:foo", Vec::new(), TestExpectation::Err( - "Range queries are only supported for fast fields. (`title` is not a fast field)", + "range queries are only supported for fast fields. (`title` is not a fast field)", ), ); } @@ -392,7 +392,7 @@ mod test { check_build_query_static_mode( "foo:bar", Vec::new(), - TestExpectation::Err("Invalid query: Field does not exist: `foo`"), + TestExpectation::Err("invalid query: field does not exist: `foo`"), ); check_build_query_static_mode( "title:bar", @@ -402,27 +402,27 @@ mod test { check_build_query_static_mode( "bar", vec!["fieldnotinschema".to_string()], - TestExpectation::Err("Invalid query: Field does not exist: `fieldnotinschema`"), + TestExpectation::Err("invalid query: field does not exist: `fieldnotinschema`"), ); check_build_query_static_mode( "title:[a TO b]", Vec::new(), TestExpectation::Err( - "Range queries are only supported for fast fields. (`title` is not a fast field)", + "range queries are only supported for fast fields. (`title` is not a fast field)", ), ); check_build_query_static_mode( "title:{a TO b} desc:foo", Vec::new(), TestExpectation::Err( - "Range queries are only supported for fast fields. (`title` is not a fast field)", + "range queries are only supported for fast fields. (`title` is not a fast field)", ), ); check_build_query_static_mode( "title:>foo", Vec::new(), TestExpectation::Err( - "Range queries are only supported for fast fields. (`title` is not a fast field)", + "range queries are only supported for fast fields. (`title` is not a fast field)", ), ); check_build_query_static_mode( @@ -443,17 +443,17 @@ mod test { check_build_query_static_mode( "foo", Vec::new(), - TestExpectation::Err("Query requires a default search field and none was supplied."), + TestExpectation::Err("query requires a default search field and none was supplied"), ); check_build_query_static_mode( "bar", Vec::new(), - TestExpectation::Err("Query requires a default search field and none was supplied"), + TestExpectation::Err("query requires a default search field and none was supplied"), ); check_build_query_static_mode( "title:hello AND (Jane OR desc:world)", Vec::new(), - TestExpectation::Err("Query requires a default search field and none was supplied"), + TestExpectation::Err("query requires a default search field and none was supplied"), ); check_build_query_static_mode( "server.running:true", @@ -468,7 +468,7 @@ mod test { check_build_query_static_mode( "IN [hello]", Vec::new(), - TestExpectation::Err("Set query need to target a specific field."), + TestExpectation::Err("set query need to target a specific field"), ); } @@ -580,7 +580,7 @@ mod test { check_build_query_static_mode( "server.running:notabool", Vec::new(), - TestExpectation::Err("Expected a `bool` search value for field `server.running`"), + TestExpectation::Err("expected a `bool` search value for field `server.running`"), ); } diff --git a/quickwit/quickwit-doc-mapper/src/routing_expression/mod.rs b/quickwit/quickwit-doc-mapper/src/routing_expression/mod.rs index d78cd36795e..106074831f1 100644 --- a/quickwit/quickwit-doc-mapper/src/routing_expression/mod.rs +++ b/quickwit/quickwit-doc-mapper/src/routing_expression/mod.rs @@ -243,17 +243,17 @@ fn convert_ast(ast: Vec) -> anyhow::Result { if args.len() != 2 { anyhow::bail!( - "Invalid arguments for `hash_mod`: expected 2 arguments, found {}", + "invalid arguments for `hash_mod`: expected 2 arguments, found {}", args.len() ); } let Argument::Expression(fields) = args.remove(0) else { - anyhow::bail!("Invalid 1st argument for `hash_mod`: expected expression"); + anyhow::bail!("invalid 1st argument for `hash_mod`: expected expression"); }; let Argument::Number(modulo) = args.remove(0) else { - anyhow::bail!("Invalid 2nd argument for `hash_mod`: expected number"); + anyhow::bail!("invalid 2nd argument for `hash_mod`: expected number"); }; Ok(InnerRoutingExpr::Modulo( @@ -261,7 +261,7 @@ fn convert_ast(ast: Vec) -> anyhow::Result anyhow::bail!("Unknown function `{}`", name), + _ => anyhow::bail!("unknown function `{}`", name), }, }) .collect::, _>>()?; diff --git a/quickwit/quickwit-index-management/src/garbage_collection.rs b/quickwit/quickwit-index-management/src/garbage_collection.rs index 6fdd53036fa..3e50a0d4ead 100644 --- a/quickwit/quickwit-index-management/src/garbage_collection.rs +++ b/quickwit/quickwit-index-management/src/garbage_collection.rs @@ -38,7 +38,7 @@ const DELETE_SPLITS_BATCH_SIZE: usize = 1000; /// [`DeleteSplitsError`] describes the errors that occurred during the deletion of splits from /// storage and metastore. #[derive(Error, Debug)] -#[error("Failed to delete splits from storage and/or metastore.")] +#[error("failed to delete splits from storage and/or metastore")] pub struct DeleteSplitsError { successes: Vec, storage_error: Option, @@ -294,7 +294,7 @@ pub async fn delete_splits_from_storage_and_metastore( error!( error=?metastore_error, index_id=index_uid.index_id(), - "Failed to delete split(s) {:?} from metastore.", + "failed to delete split(s) {:?} from metastore", PrettySample::new(&split_ids, 5), ); let delete_splits_error = DeleteSplitsError { diff --git a/quickwit/quickwit-index-management/src/index.rs b/quickwit/quickwit-index-management/src/index.rs index 76b6b2e54bc..c5403cf90ff 100644 --- a/quickwit/quickwit-index-management/src/index.rs +++ b/quickwit/quickwit-index-management/src/index.rs @@ -40,19 +40,19 @@ use crate::garbage_collection::{ #[derive(Error, Debug)] pub enum IndexServiceError { - #[error("Failed to resolve the storage `{0}`.")] + #[error("failed to resolve the storage `{0}`")] Storage(#[from] StorageResolverError), - #[error("Metastore error `{0}`.")] + #[error("metastore error `{0}`")] Metastore(#[from] MetastoreError), - #[error("Split deletion error `{0}`.")] + #[error("split deletion error `{0}`")] SplitDeletion(#[from] DeleteSplitsError), - #[error("Invalid config: {0:#}.")] + #[error("invalid config: {0:#}")] InvalidConfig(anyhow::Error), - #[error("Invalid identifier: {0}.")] + #[error("invalid identifier: {0}")] InvalidIdentifier(String), - #[error("Operation not allowed: {0}.")] + #[error("operation not allowed: {0}")] OperationNotAllowed(String), - #[error("Internal error: {0}.")] + #[error("internal error: {0}")] Internal(String), } @@ -260,7 +260,7 @@ impl IndexService { ) .await { - error!(metastore_uri=%self.metastore.uri(), index_id=%index_id, error=?err, "Failed to delete all the split files during garbage collection."); + error!(metastore_uri=%self.metastore.uri(), index_id=%index_id, error=?err, "failed to delete all the split files during garbage collection"); } for source_id in index_metadata.sources.keys() { self.metastore @@ -281,7 +281,7 @@ impl IndexService { // that the identifier is valid. However it authorizes the special // private names internal to quickwit, so we do an extra check. validate_identifier("Source ID", &source_id).map_err(|_| { - IndexServiceError::InvalidIdentifier(format!("Invalid source ID: `{source_id}`")) + IndexServiceError::InvalidIdentifier(format!("invalid source ID: `{source_id}`")) })?; check_source_connectivity(&source_config) .await @@ -290,7 +290,7 @@ impl IndexService { .add_source(index_uid.clone(), source_config) .await?; info!( - "Source `{}` successfully created for index `{}`.", + "source `{}` successfully created for index `{}`", source_id, index_uid.index_id() ); @@ -302,8 +302,7 @@ impl IndexService { .get(&source_id) .ok_or_else(|| { IndexServiceError::Internal( - "Created source is not in index metadata, this should never happen." - .to_string(), + "created source is not in index metadata, this should never happen".to_string(), ) })? .clone(); @@ -339,7 +338,7 @@ impl IndexService { /// persisted. pub async fn clear_cache_directory(data_dir_path: &Path) -> anyhow::Result<()> { let cache_directory_path = get_cache_directory_path(data_dir_path); - info!(path = %cache_directory_path.display(), "Clearing cache directory."); + info!(path = %cache_directory_path.display(), "clearing cache directory"); empty_dir(&cache_directory_path).await?; Ok(()) } @@ -384,7 +383,7 @@ mod tests { .await .unwrap_err(); let IndexServiceError::Metastore(inner_error) = error else { - panic!("Expected `MetastoreError` variant, got {:?}", error) + panic!("expected `MetastoreError` variant, got {:?}", error) }; assert!( matches!(inner_error, MetastoreError::AlreadyExists(EntityKind::Index { index_id }) if index_id == index_metadata_0.index_id()) diff --git a/quickwit/quickwit-indexing/src/actors/doc_processor.rs b/quickwit/quickwit-indexing/src/actors/doc_processor.rs index b5dd045a8ee..569a7da537f 100644 --- a/quickwit/quickwit-indexing/src/actors/doc_processor.rs +++ b/quickwit/quickwit-indexing/src/actors/doc_processor.rs @@ -255,7 +255,7 @@ impl DocProcessor { ) -> anyhow::Result { let timestamp_field_opt = extract_timestamp_field(doc_mapper.as_ref())?; if cfg!(not(feature = "vrl")) && transform_config_opt.is_some() { - anyhow::bail!("VRL is not enabled. Please recompile with the `vrl` feature.") + anyhow::bail!("VRL is not enabled. please recompile with the `vrl` feature") } let doc_processor = Self { doc_mapper, @@ -344,7 +344,7 @@ fn extract_timestamp_field(doc_mapper: &dyn DocMapper) -> anyhow::Result = ingest_api_service .ask_for_res(ListQueuesRequest {}) .await - .context("Failed to list queues.")? + .context("failed to list queues")? .queues .into_iter() .collect(); @@ -603,7 +603,7 @@ impl IndexingService { .metastore .list_indexes_metadatas(ListIndexesQuery::All) .await - .context("Failed to list queues")? + .context("failed to list queues")? .into_iter() .map(|index_metadata| index_metadata.index_id().to_string()) .collect(); diff --git a/quickwit/quickwit-indexing/src/actors/merge_executor.rs b/quickwit/quickwit-indexing/src/actors/merge_executor.rs index 97918c50f89..525027219aa 100644 --- a/quickwit/quickwit-indexing/src/actors/merge_executor.rs +++ b/quickwit/quickwit-indexing/src/actors/merge_executor.rs @@ -145,7 +145,7 @@ impl Handler for MergeExecutor { } fn combine_index_meta(mut index_metas: Vec) -> anyhow::Result { - let mut union_index_meta = index_metas.pop().with_context(|| "Only one IndexMeta")?; + let mut union_index_meta = index_metas.pop().with_context(|| "only one IndexMeta")?; for index_meta in index_metas { union_index_meta.segments.extend(index_meta.segments); } @@ -470,17 +470,17 @@ impl MergeExecutor { let num_delete_tasks = delete_tasks.len(); if num_delete_tasks > 0 { let doc_mapper = doc_mapper_opt - .ok_or_else(|| anyhow!("Doc mapper must be present if there are delete tasks."))?; + .ok_or_else(|| anyhow!("doc mapper must be present if there are delete tasks"))?; for delete_task in delete_tasks { let delete_query = delete_task .delete_query .expect("A delete task must have a delete query."); let query_ast: QueryAst = serde_json::from_str(&delete_query.query_ast) - .context("Invalid query_ast json")?; + .context("invalid query_ast json")?; // We ignore the docmapper default fields when we consider delete query. // We reparse the query here defensivley, but actually, it should already have been // done in the delete task rest handler. - let parsed_query_ast = query_ast.parse_user_query(&[]).context("Invalid query")?; + let parsed_query_ast = query_ast.parse_user_query(&[]).context("invalid query")?; debug!( "Delete all documents matched by query `{:?}`", parsed_query_ast diff --git a/quickwit/quickwit-indexing/src/actors/merge_split_downloader.rs b/quickwit/quickwit-indexing/src/actors/merge_split_downloader.rs index 933180d6908..1557e6c166f 100644 --- a/quickwit/quickwit-indexing/src/actors/merge_split_downloader.rs +++ b/quickwit/quickwit-indexing/src/actors/merge_split_downloader.rs @@ -123,7 +123,7 @@ impl MergeSplitDownloader { .await .map_err(|error| { let split_id = split.split_id(); - anyhow::anyhow!(error).context(format!("Failed to download split `{split_id}`")) + anyhow::anyhow!(error).context(format!("failed to download split `{split_id}`")) })?; tantivy_dirs.push(tantivy_dir); } diff --git a/quickwit/quickwit-indexing/src/actors/packager.rs b/quickwit/quickwit-indexing/src/actors/packager.rs index 1190a139dfc..09c36e9935c 100644 --- a/quickwit/quickwit-indexing/src/actors/packager.rs +++ b/quickwit/quickwit-indexing/src/actors/packager.rs @@ -236,7 +236,7 @@ fn try_extract_terms( .sum::(); if num_terms > max_terms { bail!( - "Number of unique terms for tag field {} > {}.", + "number of unique terms for tag field {} > {}", named_field.name, max_terms ); @@ -256,11 +256,11 @@ fn try_extract_terms( FieldType::Bool(_) => match u64_from_term_data(term_data)? { 0 => false, 1 => true, - _ => bail!("Invalid boolean value"), + _ => bail!("invalid boolean value"), } .to_string(), FieldType::Bytes(_) => { - bail!("Tags collection is not allowed on `bytes` fields.") + bail!("tags collection is not allowed on `bytes` fields") } _ => std::str::from_utf8(term_data)?.to_string(), }; @@ -327,7 +327,7 @@ fn create_packaged_split( fn u64_from_term_data(data: &[u8]) -> anyhow::Result { let u64_bytes: [u8; 8] = data[0..8] .try_into() - .context("Could not interpret term bytes as u64")?; + .context("could not interpret term bytes as u64")?; Ok(u64::from_be_bytes(u64_bytes)) } diff --git a/quickwit/quickwit-indexing/src/actors/publisher.rs b/quickwit/quickwit-indexing/src/actors/publisher.rs index b78bd09de93..697b41631cd 100644 --- a/quickwit/quickwit-indexing/src/actors/publisher.rs +++ b/quickwit/quickwit-indexing/src/actors/publisher.rs @@ -135,7 +135,7 @@ impl Handler for Publisher { publish_token_opt, )) .await - .context("Failed to publish splits.")?; + .context("failed to publish splits")?; } else { // TODO: Remove the junk right away? info!( diff --git a/quickwit/quickwit-indexing/src/actors/sequencer.rs b/quickwit/quickwit-indexing/src/actors/sequencer.rs index bc27122f9f9..0a0f937f583 100644 --- a/quickwit/quickwit-indexing/src/actors/sequencer.rs +++ b/quickwit/quickwit-indexing/src/actors/sequencer.rs @@ -81,11 +81,11 @@ where let command = ctx .protect_future(message) .await - .context("Failed to receive command from uploader.")?; + .context("failed to receive command from uploader")?; if let SequencerCommand::Proceed(msg) = command { ctx.send_message(&self.mailbox, msg) .await - .context("Failed to send message to publisher.")?; + .context("failed to send message to publisher")?; } Ok(()) } diff --git a/quickwit/quickwit-indexing/src/actors/uploader.rs b/quickwit/quickwit-indexing/src/actors/uploader.rs index 4b3a76b8918..670443a4d70 100644 --- a/quickwit/quickwit-indexing/src/actors/uploader.rs +++ b/quickwit/quickwit-indexing/src/actors/uploader.rs @@ -127,9 +127,7 @@ impl SplitsUpdateSender { fn discard(self) -> anyhow::Result<()> { if let SplitsUpdateSender::Sequencer(split_uploader_tx) = self { if split_uploader_tx.send(SequencerCommand::Discard).is_err() { - bail!( - "Failed to send cancel command to sequencer. The sequencer is probably dead." - ); + bail!("failed to send cancel command to sequencer. the sequencer is probably dead"); } } Ok(()) @@ -146,7 +144,7 @@ impl SplitsUpdateSender { split_uploaded_tx.send(SequencerCommand::Proceed(split_update)) { bail!( - "Failed to send upload split `{:?}`. The publisher is probably dead.", + "failed to send upload split `{:?}`. the publisher is probably dead", &publisher_message ); } @@ -221,7 +219,7 @@ impl Uploader { concurrent_upload_permits .acquire() .await - .context("The uploader semaphore is closed. (This should never happen.)") + .context("the uploader semaphore is closed. (this should never happen)") } } @@ -341,7 +339,7 @@ impl Handler for Uploader { if let Err(cause) = upload_result { warn!(cause=?cause, split_id=packaged_split.split_id(), "Failed to upload split. Killing!"); kill_switch.kill(); - bail!("Failed to upload split `{}`. Killing!", packaged_split.split_id()); + bail!("failed to upload split `{}`. killing the actor contex", packaged_split.split_id()); } packaged_splits_and_metadata.push((packaged_split, metadata)); diff --git a/quickwit/quickwit-indexing/src/source/file_source.rs b/quickwit/quickwit-indexing/src/source/file_source.rs index d07f6e669d5..08a631b4421 100644 --- a/quickwit/quickwit-indexing/src/source/file_source.rs +++ b/quickwit/quickwit-indexing/src/source/file_source.rs @@ -91,7 +91,7 @@ impl Source for FileSource { if let Some(filepath) = &self.params.filepath { let filepath_str = filepath .to_str() - .context("Path is invalid utf-8")? + .context("path is invalid utf-8")? .to_string(); let partition_id = PartitionId::from(filepath_str); doc_batch @@ -140,7 +140,7 @@ impl TypedSourceFactory for FileSourceFactory { let reader: Box = if let Some(filepath) = ¶ms.filepath { let mut file = File::open(&filepath).await.with_context(|| { - format!("Failed to open source file `{}`.", filepath.display()) + format!("failed to open source file `{}`", filepath.display()) })?; let partition_id = PartitionId::from(filepath.to_string_lossy().to_string()); if let Some(Position::Offset(offset_str)) = diff --git a/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs b/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs index 27ce24eb5b4..63e2c198027 100644 --- a/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs +++ b/quickwit/quickwit-indexing/src/source/gcp_pubsub_source.rs @@ -113,14 +113,14 @@ impl GcpPubSubSource { .await .with_context(|| { format!( - "Failed to load GCP PubSub credentials file from `{credentials_file}`." + "failed to load GCP PubSub credentials file from `{credentials_file}`" ) })?; ClientConfig::default().with_credentials(credentials).await } _ => ClientConfig::default().with_auth().await, } - .context("Failed to create GCP PubSub client config.")?; + .context("failed to create GCP PubSub client config")?; if params.project_id.is_some() { client_config.project_id = params.project_id @@ -128,7 +128,7 @@ impl GcpPubSubSource { let client = Client::new(client_config) .await - .context("Failed to create GCP PubSub client.")?; + .context("failed to create GCP PubSub client")?; let subscription = client.subscription(&subscription_name); // TODO: replace with "///" let partition_id = append_random_suffix(&format!("gpc-pubsub-{subscription_name}")); @@ -142,7 +142,7 @@ impl GcpPubSubSource { "Starting GCP PubSub source." ); if !subscription.exists(Some(RetrySetting::default())).await? { - anyhow::bail!("GCP PubSub subscription `{subscription_name}` does not exist."); + anyhow::bail!("GCP PubSub subscription `{subscription_name}` does not exist"); } Ok(Self { ctx, @@ -249,7 +249,7 @@ impl GcpPubSubSource { .subscription .pull(self.max_messages_per_pull, None) .await - .context("Failed to pull messages from subscription.")?; + .context("failed to pull messages from subscription")?; let Some(last_message) = messages.last() else { return Ok(()); @@ -282,7 +282,7 @@ impl GcpPubSubSource { batch .checkpoint_delta .record_partition_delta(self.partition_id.clone(), from_position, to_position) - .context("Failed to record partition delta.")?; + .context("failed to record partition delta")?; Ok(()) } } diff --git a/quickwit/quickwit-indexing/src/source/kafka_source.rs b/quickwit/quickwit-indexing/src/source/kafka_source.rs index ef10b823f2e..91151b6df55 100644 --- a/quickwit/quickwit-indexing/src/source/kafka_source.rs +++ b/quickwit/quickwit-indexing/src/source/kafka_source.rs @@ -319,8 +319,8 @@ impl KafkaSource { .get(&partition) .ok_or_else(|| { anyhow::anyhow!( - "Received message from unassigned partition `{}`. Assigned partitions: \ - `{{{}}}`.", + "received message from unassigned partition `{}`. Assigned partitions: \ + `{{{}}}`", partition, self.state.assigned_partitions.keys().join(", "), ) @@ -335,7 +335,7 @@ impl KafkaSource { batch .checkpoint_delta .record_partition_delta(partition_id, previous_position, current_position) - .context("Failed to record partition delta.")?; + .context("failed to record partition delta")?; Ok(()) } @@ -354,7 +354,7 @@ impl KafkaSource { .await .with_context(|| { format!( - "Failed to fetch index metadata for index `{}`.", + "failed to fetch index metadata for index `{}`", self.ctx.index_uid.index_id() ) })?; @@ -400,7 +400,7 @@ impl KafkaSource { ); assignment_tx .send(next_offsets) - .map_err(|_| anyhow!("Consumer context was dropped."))?; + .map_err(|_| anyhow!("consumer context was dropped"))?; Ok(()) } @@ -414,7 +414,7 @@ impl KafkaSource { ctx.protect_future(self.publish_lock.kill()).await; ack_tx .send(()) - .map_err(|_| anyhow!("Consumer context was dropped."))?; + .map_err(|_| anyhow!("consumer context was dropped"))?; batch.clear(); self.publish_lock = PublishLock::default(); @@ -472,7 +472,7 @@ impl Source for KafkaSource { loop { tokio::select! { event_opt = self.events_rx.recv() => { - let event = event_opt.ok_or_else(|| ActorExitStatus::from(anyhow!("Consumer was dropped.")))?; + let event = event_opt.ok_or_else(|| ActorExitStatus::from(anyhow!("consumer was dropped")))?; match event { KafkaEvent::Message(message) => self.process_message(message, &mut batch).await?, KafkaEvent::AssignPartitions { partitions, assignment_tx} => self.process_assign_partitions(ctx, &partitions, assignment_tx).await?, @@ -616,18 +616,18 @@ pub(super) async fn check_connectivity(params: KafkaSourceParams) -> anyhow::Res let cluster_metadata = spawn_blocking(move || { consumer .fetch_metadata(Some(&topic), timeout) - .with_context(|| format!("Failed to fetch metadata for topic `{topic}`.")) + .with_context(|| format!("failed to fetch metadata for topic `{topic}`")) }) .await??; if cluster_metadata.topics().is_empty() { - bail!("Topic `{}` does not exist.", params.topic); + bail!("topic `{}` does not exist", params.topic); } let topic_metadata = &cluster_metadata.topics()[0]; assert_eq!(topic_metadata.name(), params.topic); // Belt and suspenders. if topic_metadata.partitions().is_empty() { - bail!("Topic `{}` has no partitions.", params.topic); + bail!("topic `{}` has no partitions", params.topic); } Ok(()) } @@ -661,7 +661,7 @@ fn create_consumer( topic: params.topic, events_tx, }) - .context("Failed to create Kafka consumer.")?; + .context("failed to create Kafka consumer")?; Ok((client_config, consumer)) } @@ -679,7 +679,7 @@ fn parse_client_log_level(client_log_level: Option) -> anyhow::Result RDKafkaLogLevel::Alert, Some("emerg") => RDKafkaLogLevel::Emerg, Some(level) => bail!( - "Failed to parse Kafka client log level. Value `{}` is not supported.", + "failed to parse Kafka client log level. value `{}` is not supported", level ), }; @@ -690,7 +690,7 @@ fn parse_client_params(client_params: JsonValue) -> anyhow::Result let params = if let JsonValue::Object(params) = client_params { params } else { - bail!("Failed to parse Kafka client parameters. `client_params` must be a JSON object."); + bail!("failed to parse Kafka client parameters. `client_params` must be a JSON object"); }; let mut client_config = ClientConfig::new(); for (key, value_json) in params { @@ -700,8 +700,8 @@ fn parse_client_params(client_params: JsonValue) -> anyhow::Result JsonValue::String(value_string) => value_string, JsonValue::Null => continue, JsonValue::Array(_) | JsonValue::Object(_) => bail!( - "Failed to parse Kafka client parameters. `client_params.{}` must be a boolean, \ - number, or string.", + "failed to parse Kafka client parameters. `client_params.{}` must be a boolean, \ + number, or string", key ), }; @@ -782,7 +782,7 @@ mod kafka_broker_tests { .collect::, _>>() .map_err(|(topic, err_code)| { anyhow::anyhow!( - "Failed to create topic `{}`. Error code: `{}`", + "failed to create topic `{}`. error code: `{}`", topic, err_code ) diff --git a/quickwit/quickwit-indexing/src/source/kinesis/api.rs b/quickwit/quickwit-indexing/src/source/kinesis/api.rs index e80867bbbdc..74b6a40086d 100644 --- a/quickwit/quickwit-indexing/src/source/kinesis/api.rs +++ b/quickwit/quickwit-indexing/src/source/kinesis/api.rs @@ -145,7 +145,7 @@ pub(crate) mod tests { .await }) .await - .with_context(|| format!("Failed to create Kinesis data stream `{stream_name}`."))?; + .with_context(|| format!("failed to create Kinesis data stream `{stream_name}`"))?; Ok(()) } @@ -163,7 +163,7 @@ pub(crate) mod tests { .await }) .await - .with_context(|| format!("Failed to delete Kinesis data stream `{stream_name}`."))?; + .with_context(|| format!("failed to delete Kinesis data stream `{stream_name}`"))?; Ok(()) } @@ -184,7 +184,7 @@ pub(crate) mod tests { response .stream_description - .ok_or_else(|| anyhow!("No stream summary was returned from AWS")) + .ok_or_else(|| anyhow!("no stream summary was returned from AWS")) } /// Lists the Kinesis data streams. /// https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListStreams.html diff --git a/quickwit/quickwit-indexing/src/source/kinesis/helpers.rs b/quickwit/quickwit-indexing/src/source/kinesis/helpers.rs index dc58172e48d..75c5f177e9e 100644 --- a/quickwit/quickwit-indexing/src/source/kinesis/helpers.rs +++ b/quickwit/quickwit-indexing/src/source/kinesis/helpers.rs @@ -134,7 +134,7 @@ pub(crate) mod tests { .or_insert_with(Vec::new) .push(sequence_number); } else { - bail!("Sequence number is missing from record."); + bail!("sequence number is missing from record"); } } Ok(sequence_numbers) diff --git a/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs b/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs index e2e4699b958..1d3d2fda614 100644 --- a/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs +++ b/quickwit/quickwit-indexing/src/source/kinesis/kinesis_source.rs @@ -237,7 +237,7 @@ impl Source for KinesisSource { // This should in theory never be `None` but is an `Option` nontheless // so it is probably best to error rather than skip here in case this changes. let record_sequence_number = record.sequence_number - .context("Received Kinesis record without sequence number.")?; + .context("received Kinesis record without sequence number")?; if record_data.is_empty() { warn!( @@ -262,7 +262,7 @@ impl Source for KinesisSource { .get_mut(&shard_id) .ok_or_else(|| { anyhow::anyhow!( - "Received record from unassigned shard `{}`.", shard_id, + "received record from unassigned shard `{}`", shard_id, ) })?; shard_consumer_state.lag_millis = lag_millis; @@ -275,7 +275,7 @@ impl Source for KinesisSource { partition_id, previous_position, current_position, - ).context("Failed to record partition delta.")?; + ).context("failed to record partition delta")?; } } if batch_num_bytes >= TARGET_BATCH_NUM_BYTES { @@ -364,7 +364,7 @@ pub(super) async fn get_region( if let Some(endpoint) = sdk_config.endpoint_url() { return Ok(RegionOrEndpoint::Endpoint(endpoint.to_string())); } - bail!("Unable to sniff region from envioronment") + bail!("unable to sniff region from envioronment") } #[cfg(all(test, feature = "kinesis-localstack-tests"))] diff --git a/quickwit/quickwit-indexing/src/source/mod.rs b/quickwit/quickwit-indexing/src/source/mod.rs index 40bec01ccec..d8b68513033 100644 --- a/quickwit/quickwit-indexing/src/source/mod.rs +++ b/quickwit/quickwit-indexing/src/source/mod.rs @@ -313,7 +313,7 @@ pub async fn check_source_connectivity(source_config: &SourceConfig) -> anyhow:: SourceParams::File(params) => { if let Some(filepath) = ¶ms.filepath { if !Path::new(filepath).try_exists()? { - bail!("File `{}` does not exist.", filepath.display()) + bail!("file `{}` does not exist", filepath.display()) } } Ok(()) @@ -321,7 +321,7 @@ pub async fn check_source_connectivity(source_config: &SourceConfig) -> anyhow:: #[allow(unused_variables)] SourceParams::Kafka(params) => { #[cfg(not(feature = "kafka"))] - bail!("Quickwit binary was not compiled with the `kafka` feature."); + bail!("Quickwit binary was not compiled with the `kafka` feature"); #[cfg(feature = "kafka")] { @@ -332,7 +332,7 @@ pub async fn check_source_connectivity(source_config: &SourceConfig) -> anyhow:: #[allow(unused_variables)] SourceParams::Kinesis(params) => { #[cfg(not(feature = "kinesis"))] - bail!("Quickwit binary was not compiled with the `kinesis` feature."); + bail!("Quickwit binary was not compiled with the `kinesis` feature"); #[cfg(feature = "kinesis")] { @@ -343,7 +343,7 @@ pub async fn check_source_connectivity(source_config: &SourceConfig) -> anyhow:: #[allow(unused_variables)] SourceParams::Pulsar(params) => { #[cfg(not(feature = "pulsar"))] - bail!("Quickwit binary was not compiled with the `pulsar` feature."); + bail!("Quickwit binary was not compiled with the `pulsar` feature"); #[cfg(feature = "pulsar")] { diff --git a/quickwit/quickwit-indexing/src/source/pulsar_source.rs b/quickwit/quickwit-indexing/src/source/pulsar_source.rs index 114b35113f5..ae6c993ee3d 100644 --- a/quickwit/quickwit-indexing/src/source/pulsar_source.rs +++ b/quickwit/quickwit-indexing/src/source/pulsar_source.rs @@ -194,7 +194,7 @@ impl PulsarSource { batch .checkpoint_delta .record_partition_delta(partition, current_position, msg_position) - .context("Failed to record partition delta.")?; + .context("failed to record partition delta")?; batch.push(doc); self.state.num_bytes_processed += num_bytes; @@ -235,8 +235,8 @@ impl Source for PulsarSource { // that we can use the consumer within this Sync context. message = self.pulsar_consumer.next() => { let message = message - .ok_or_else(|| ActorExitStatus::from(anyhow!("Consumer was dropped.")))? - .map_err(|e| ActorExitStatus::from(anyhow!("Failed to get message from consumer: {:?}", e)))?; + .ok_or_else(|| ActorExitStatus::from(anyhow!("consumer was dropped")))? + .map_err(|e| ActorExitStatus::from(anyhow!("failed to get message from consumer: {:?}", e)))?; self.process_message(message, &mut batch).map_err(ActorExitStatus::from)?; diff --git a/quickwit/quickwit-indexing/src/source/source_factory.rs b/quickwit/quickwit-indexing/src/source/source_factory.rs index 2de312a6662..05e4e0e6d63 100644 --- a/quickwit/quickwit-indexing/src/source/source_factory.rs +++ b/quickwit/quickwit-indexing/src/source/source_factory.rs @@ -69,14 +69,14 @@ pub struct SourceLoader { #[derive(Error, Debug)] pub enum SourceLoaderError { #[error( - "Unknown source type `{requested_source_type}` (available source types are \ - {available_source_types})." + "unknown source type `{requested_source_type}` (available source types are \ + {available_source_types})" )] UnknownSourceType { requested_source_type: String, available_source_types: String, //< a comma separated list with the available source_type. }, - #[error("Failed to create source `{source_id}` of type `{source_type}`. Cause: {error:?}")] + #[error("failed to create source `{source_id}` of type `{source_type}`. Cause: {error:?}")] FailedToCreateSource { source_id: String, source_type: String, diff --git a/quickwit/quickwit-indexing/src/split_store/indexing_split_store.rs b/quickwit/quickwit-indexing/src/split_store/indexing_split_store.rs index aed43f73b0b..f58238c79d4 100644 --- a/quickwit/quickwit-indexing/src/split_store/indexing_split_store.rs +++ b/quickwit/quickwit-indexing/src/split_store/indexing_split_store.rs @@ -134,7 +134,7 @@ impl IndexingSplitStore { .await .with_context(|| { format!( - "Failed uploading key {} in bucket {}", + "failed uploading key {} in bucket {}", key.display(), self.inner.remote_storage.uri() ) diff --git a/quickwit/quickwit-indexing/src/split_store/local_split_store.rs b/quickwit/quickwit-indexing/src/split_store/local_split_store.rs index 7918bd9dc90..913564720d9 100644 --- a/quickwit/quickwit-indexing/src/split_store/local_split_store.rs +++ b/quickwit/quickwit-indexing/src/split_store/local_split_store.rs @@ -78,7 +78,7 @@ pub fn get_tantivy_directory_from_split_bundle( let mmap_directory = MmapDirectory::open(split_file.parent().ok_or_else(|| { io::Error::new( io::ErrorKind::NotFound, - format!("Couldn't find parent for {:?}", &split_file), + format!("couldn't find parent for {:?}", &split_file), ) })?)?; let split_fileslice = mmap_directory.open_read(Path::new(&split_file))?; @@ -118,7 +118,7 @@ impl SplitFolder { /// There are no specific constraint on `path`. pub async fn create(split_id: &str, path: &Path) -> io::Result { let split_id = Ulid::from_str(split_id).map_err(|_err| { - let error_msg = format!("Split Id should be an `Ulid`. Got `{split_id:?}`."); + let error_msg = format!("Split Id should be an `Ulid`. got `{split_id:?}`"); io::Error::new(io::ErrorKind::InvalidInput, error_msg) })?; let num_bytes = num_bytes_in_folder(path).await?; @@ -297,7 +297,7 @@ impl LocalSplitStore { ) -> anyhow::Result { tokio::fs::create_dir_all(&split_store_folder) .await - .context("Failed to create the split cache directory.")?; + .context("failed to create the split cache directory")?; let mut split_folders: Vec = Vec::new(); @@ -316,8 +316,8 @@ impl LocalSplitStore { let split_id = split_id_from_split_folder(&dir_path).ok_or_else(|| { let error_msg = format!( - "Split folder name should match the format `.split`. Got \ - `{dir_path:?}`." + "split folder name should match the format `.split`. got \ + `{dir_path:?}`" ); io::Error::new(io::ErrorKind::InvalidInput, error_msg) })?; diff --git a/quickwit/quickwit-ingest/src/errors.rs b/quickwit/quickwit-ingest/src/errors.rs index 5e8eedb1145..0ecfc7eb02d 100644 --- a/quickwit/quickwit-ingest/src/errors.rs +++ b/quickwit/quickwit-ingest/src/errors.rs @@ -27,21 +27,21 @@ use serde::Serialize; #[derive(Debug, Clone, thiserror::Error, Serialize)] pub enum IngestServiceError { - #[error("Data corruption: {0}.")] + #[error("data corruption: {0}")] Corruption(String), - #[error("Index `{index_id}` already exists.")] + #[error("index `{index_id}` already exists")] IndexAlreadyExists { index_id: String }, - #[error("Index `{index_id}` not found.")] + #[error("index `{index_id}` not found")] IndexNotFound { index_id: String }, - #[error("An internal error occurred: {0}.")] + #[error("an internal error occurred: {0}")] Internal(String), - #[error("Invalid position: {0}.")] + #[error("invalid position: {0}")] InvalidPosition(String), - #[error("Io Error {0}")] + #[error("io error {0}")] IoError(String), - #[error("Rate limited")] + #[error("rate limited")] RateLimited, - #[error("The ingest service is unavailable.")] + #[error("the ingest service is unavailable")] Unavailable, } @@ -106,12 +106,12 @@ impl ServiceError for IngestServiceError { } #[derive(Debug, thiserror::Error)] -#[error("Key should contain 16 bytes, got {0}.")] +#[error("key should contain 16 bytes, got {0}")] pub struct CorruptedKey(pub usize); impl From for IngestServiceError { fn from(error: CorruptedKey) -> Self { - IngestServiceError::Corruption(format!("Corrupted key: {error:?}")) + IngestServiceError::Corruption(format!("corrupted key: {error:?}")) } } @@ -137,7 +137,7 @@ impl From for IngestServiceError { match error { ReadRecordError::IoError(io_error) => io_error.into(), ReadRecordError::Corruption => { - IngestServiceError::Corruption("Failed to read record".to_string()) + IngestServiceError::Corruption("failed to read record".to_string()) } } } @@ -150,7 +150,7 @@ impl From for IngestServiceError { AppendError::MissingQueue(index_id) => IngestServiceError::IndexNotFound { index_id }, // these errors can't be reached right now AppendError::Past => IngestServiceError::InvalidPosition( - "Attempted to append a record in the past".to_string(), + "attempted to append a record in the past".to_string(), ), } } diff --git a/quickwit/quickwit-ingest/src/ingest_api_service.rs b/quickwit/quickwit-ingest/src/ingest_api_service.rs index b1eae60f9c1..f69f265a14b 100644 --- a/quickwit/quickwit-ingest/src/ingest_api_service.rs +++ b/quickwit/quickwit-ingest/src/ingest_api_service.rs @@ -81,7 +81,7 @@ async fn get_or_initialize_partition_id(dir_path: &Path) -> crate::Result IngestV2Result { if persist_request.leader_id != self.self_node_id { return Err(IngestV2Error::Internal(format!( - "Routing error: request was sent to ingester node `{}` instead of `{}`.", + "routing error: request was sent to ingester node `{}` instead of `{}`", self.self_node_id, persist_request.leader_id, ))); } @@ -366,7 +366,7 @@ impl IngesterService for Ingester { let open_replication_stream_request = syn_replication_stream .next() .await - .ok_or_else(|| IngestV2Error::Internal("Syn replication stream aborted.".to_string()))? + .ok_or_else(|| IngestV2Error::Internal("syn replication stream aborted".to_string()))? .into_open_request() .expect("The first message should be an open replication stream request."); @@ -380,7 +380,7 @@ impl IngesterService for Ingester { let Entry::Vacant(entry) = state_guard.replication_tasks.entry(leader_id.clone()) else { return Err(IngestV2Error::Internal(format!( - "A replication stream betwen {leader_id} and {follower_id} is already opened." + "a replication stream betwen {leader_id} and {follower_id} is already opened" ))); }; let (ack_replication_stream_tx, ack_replication_stream) = ServiceStream::new_bounded(5); @@ -414,7 +414,7 @@ impl IngesterService for Ingester { .read() .await .find_shard_status_rx(&queue_id) - .ok_or_else(|| IngestV2Error::Internal("Shard not found.".to_string()))?; + .ok_or_else(|| IngestV2Error::Internal("shard not found".to_string()))?; let service_stream = FetchTask::spawn( open_fetch_stream_request, mrecordlog, @@ -450,7 +450,7 @@ impl IngesterService for Ingester { ) -> IngestV2Result { if truncate_request.leader_id != self.self_node_id { return Err(IngestV2Error::Internal(format!( - "Routing error: expected ingester `{}`, got `{}`.", + "routing error: expected ingester `{}`, got `{}`", truncate_request.leader_id, self.self_node_id ))); } @@ -467,7 +467,7 @@ impl IngesterService for Ingester { .truncate(&queue_id, subrequest.to_position_inclusive) .await .map_err(|error| { - IngestV2Error::Internal(format!("Failed to truncate: {error:?}")) + IngestV2Error::Internal(format!("failed to truncate: {error:?}")) })?; } if let Some(replica_shard) = state_guard.replica_shards.get(&queue_id) { diff --git a/quickwit/quickwit-ingest/src/ingest_v2/replication.rs b/quickwit/quickwit-ingest/src/ingest_v2/replication.rs index 8f315b16c69..5181e5ebb36 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/replication.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/replication.rs @@ -225,13 +225,13 @@ impl ReplicationTask { ) -> IngestV2Result { if replicate_request.leader_id != self.leader_id { return Err(IngestV2Error::Internal(format!( - "Invalid argument: expected leader ID `{}`, got `{}`.", + "invalid argument: expected leader ID `{}`, got `{}`", self.leader_id, replicate_request.leader_id ))); } if replicate_request.follower_id != self.follower_id { return Err(IngestV2Error::Internal(format!( - "Invalid argument: expected follower ID `{}`, got `{}`.", + "invalid argument: expected follower ID `{}`, got `{}`", self.follower_id, replicate_request.follower_id ))); } @@ -327,8 +327,8 @@ impl ReplicationTask { if replica_position_inclusive != to_position_inclusive { return Err(IngestV2Error::Internal(format!( - "Bad replica position: expected {to_position_inclusive:?}, got \ - {replica_position_inclusive:?}." + "bad replica position: expected {to_position_inclusive:?}, got \ + {replica_position_inclusive:?}" ))); } replica_shard.set_replica_position_inclusive(replica_position_inclusive); diff --git a/quickwit/quickwit-ingest/src/lib.rs b/quickwit/quickwit-ingest/src/lib.rs index bc7d98459e6..263243f27d8 100644 --- a/quickwit/quickwit-ingest/src/lib.rs +++ b/quickwit/quickwit-ingest/src/lib.rs @@ -78,7 +78,7 @@ pub async fn init_ingest_api( .await .with_context(|| { format!( - "Failed to open the ingest API record log located at `{}`.", + "failed to open the ingest API record log located at `{}`", queues_dir_path.display() ) })?; @@ -99,7 +99,7 @@ pub async fn get_ingest_api_service( return Ok(mailbox.clone()); } bail!( - "Ingest API service with queues directory located at `{}` is not initialized.", + "ingest API service with queues directory located at `{}` is not initialized", queues_dir_path.display() ) } diff --git a/quickwit/quickwit-ingest/src/memory_capacity.rs b/quickwit/quickwit-ingest/src/memory_capacity.rs index 39f1b9c662f..509f0c533ad 100644 --- a/quickwit/quickwit-ingest/src/memory_capacity.rs +++ b/quickwit/quickwit-ingest/src/memory_capacity.rs @@ -22,7 +22,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; #[derive(Debug, Clone, Copy, thiserror::Error)] -#[error("Failed to reserve requested memory capacity. Current capacity: {0}")] +#[error("failed to reserve requested memory capacity. current capacity: {0}")] pub struct ReserveCapacityError(usize); #[derive(Clone)] diff --git a/quickwit/quickwit-integration-tests/src/tests/index_tests.rs b/quickwit/quickwit-integration-tests/src/tests/index_tests.rs index bbad4a3cec1..e887cb82281 100644 --- a/quickwit/quickwit-integration-tests/src/tests/index_tests.rs +++ b/quickwit/quickwit-integration-tests/src/tests/index_tests.rs @@ -454,8 +454,8 @@ async fn test_very_large_index_name() { .unwrap_err(); assert!(error.to_string().ends_with( - "is invalid. Identifiers must match the following regular expression: \ - `^[a-zA-Z][a-zA-Z0-9-_\\.]{2,254}$`..)" + "is invalid. identifiers must match the following regular expression: \ + `^[a-zA-Z][a-zA-Z0-9-_\\.]{2,254}$`)" )); // Clean up diff --git a/quickwit/quickwit-janitor/src/actors/garbage_collector.rs b/quickwit/quickwit-janitor/src/actors/garbage_collector.rs index 1edfba84325..39bfcfb1816 100644 --- a/quickwit/quickwit-janitor/src/actors/garbage_collector.rs +++ b/quickwit/quickwit-janitor/src/actors/garbage_collector.rs @@ -480,7 +480,7 @@ mod tests { .times(4) .returning(move |_list_indexes_query: ListIndexesQuery| { Err(MetastoreError::Db { - message: "Fail to list indexes.".to_string(), + message: "fail to list indexes".to_string(), }) }); diff --git a/quickwit/quickwit-janitor/src/error.rs b/quickwit/quickwit-janitor/src/error.rs index 4b6f73cf638..c9581be10e5 100644 --- a/quickwit/quickwit-janitor/src/error.rs +++ b/quickwit/quickwit-janitor/src/error.rs @@ -26,11 +26,11 @@ use thiserror::Error; #[allow(missing_docs)] #[derive(Error, Debug, Serialize, Deserialize)] pub enum JanitorError { - #[error("Invalid delete query: `{0}`.")] + #[error("invalid delete query: `{0}`")] InvalidDeleteQuery(String), - #[error("Internal error: `{0}`")] + #[error("internal error: `{0}`")] Internal(String), - #[error("Metastore error: `{0}`.")] + #[error("metastore error: `{0}`")] Metastore(#[from] MetastoreError), } diff --git a/quickwit/quickwit-macros/src/lib.rs b/quickwit/quickwit-macros/src/lib.rs index 8870aacbfed..e43e489fba9 100644 --- a/quickwit/quickwit-macros/src/lib.rs +++ b/quickwit/quickwit-macros/src/lib.rs @@ -47,7 +47,7 @@ fn serde_multikey_inner(_attr: TokenStream, item: TokenStream) -> Result(item) else { return Err(Error::new( Span::call_site(), - "The attribute can only be applied to struct.", + "the attribute can only be applied to struct", )); }; @@ -71,7 +71,7 @@ fn generate_main_struct(mut input: ItemStruct) -> Result { if !deserialize && !serialize { return Err(Error::new( Span::call_site(), - "`serde_multikey` was applied to a non Serialize/Deserialize struct.", + "`serde_multikey` was applied to a non Serialize/Deserialize struct", )); } @@ -167,7 +167,7 @@ fn generate_proxy_struct(mut input: ItemStruct) -> Result { let Fields::Named(FieldsNamed { brace_token, named }) = input.fields else { return Err(Error::new( Span::call_site(), - "`serde_multikey` was applied to a tuple-struct or an empty struct.", + "`serde_multikey` was applied to a tuple-struct or an empty struct", )); }; for pair in named.into_pairs() { @@ -192,13 +192,13 @@ fn generate_proxy_struct(mut input: ItemStruct) -> Result { (true, None) => { return Err(Error::new( field_name.span(), - "Structure implement Serialize but no serializer defined", + "structure implement serialize but no serializer defined", )); } (false, Some(_)) => { return Err(Error::new( field_name.span(), - "Structure doesn't implement Serialize but a serializer is defined", + "structure doesn't implement serialize but a serializer is defined", )); } } @@ -210,13 +210,13 @@ fn generate_proxy_struct(mut input: ItemStruct) -> Result { (true, None) => { return Err(Error::new( field_name.span(), - "Structure implement Deserialize but no deserializer defined", + "structure implement deserialize but no deserializer defined", )); } (false, Some(_)) => { return Err(Error::new( field_name.span(), - "Structure doesn't implement Deserialize but a deserializer is defined", + "structure doesn't implement deserialize but a deserializer is defined", )); } } @@ -473,14 +473,14 @@ fn parse_attributes( let last = multikey_attributes.last().unwrap(); return Err(Error::new( last.pound_token.spans[0], - "`serde_multikey` was applied multiple time to the same field.", + "`serde_multikey` was applied multiple time to the same field", )); } let options = if let Some(multikey_attribute) = multikey_attributes.pop() { let Meta::List(meta_list) = multikey_attribute.meta else { return Err(Error::new( multikey_attribute.pound_token.spans[0], - "`serde_multikey` require list-style arguments.", + "`serde_multikey` require list-style arguments", )); }; let mut options: MultiKeyOptions = syn::parse2(meta_list.tokens)?; diff --git a/quickwit/quickwit-metastore/src/backward_compatibility_tests/mod.rs b/quickwit/quickwit-metastore/src/backward_compatibility_tests/mod.rs index edaf21021af..6cecc18a77c 100644 --- a/quickwit/quickwit-metastore/src/backward_compatibility_tests/mod.rs +++ b/quickwit/quickwit-metastore/src/backward_compatibility_tests/mod.rs @@ -50,11 +50,11 @@ const GLOBAL_QUICKWIT_RESOURCE_VERSION: &str = "0.6"; /// This test makes sure that the resource is using the current `GLOBAL_QUICKWIT_RESOURCE_VERSION`. fn test_global_version(serializable: &T) -> anyhow::Result<()> { let json = serde_json::to_value(serializable).unwrap(); - let version_value = json.get("version").context("No version tag")?; + let version_value = json.get("version").context("no version tag")?; let version_str = version_value.as_str().context("version should be a str")?; if version_str != GLOBAL_QUICKWIT_RESOURCE_VERSION { bail!( - "Version `{version_str}` is not the global quickwit resource version \ + "version `{version_str}` is not the global quickwit resource version \ ({GLOBAL_QUICKWIT_RESOURCE_VERSION})" ); } @@ -83,7 +83,7 @@ where T: TestableForRegression + std::fmt::Debug { fn test_backward_compatibility(test_dir: &Path) -> anyhow::Result<()> where T: TestableForRegression + std::fmt::Debug { for entry in - fs::read_dir(test_dir).with_context(|| format!("Failed to read {}", test_dir.display()))? + fs::read_dir(test_dir).with_context(|| format!("failed to read {}", test_dir.display()))? { let entry = entry?; let path = entry.path(); @@ -197,7 +197,7 @@ pub(crate) fn test_json_backward_compatibility_helper(test_name: &str) -> any where T: TestableForRegression + std::fmt::Debug { let sample_instance: T = T::sample_for_regression(); let test_dir = Path::new("test-data").join(test_name); - test_global_version(&sample_instance).context("Version is not the global version.")?; + test_global_version(&sample_instance).context("version is not the global version")?; test_backward_compatibility::(&test_dir).context("backward-compatibility")?; test_and_update_expected_files::(&test_dir).context("test-and-update")?; test_and_create_new_test::(&test_dir, sample_instance) diff --git a/quickwit/quickwit-metastore/src/checkpoint.rs b/quickwit/quickwit-metastore/src/checkpoint.rs index 3526c925dc8..a4131ebc5ae 100644 --- a/quickwit/quickwit-metastore/src/checkpoint.rs +++ b/quickwit/quickwit-metastore/src/checkpoint.rs @@ -295,7 +295,7 @@ impl<'de> Deserialize<'de> for SourceCheckpoint { /// the checkpoint. #[derive(Clone, Debug, Error, Eq, PartialEq, Serialize, Deserialize)] #[error( - "Incompatible checkpoint delta at partition `{partition_id}`: cur_pos:{partition_position:?} \ + "incompatible checkpoint delta at partition `{partition_id}`: cur_pos:{partition_position:?} \ delta_pos:{delta_from_position:?}" )] pub struct IncompatibleCheckpointDelta { @@ -312,7 +312,7 @@ pub enum PartitionDeltaError { #[error(transparent)] IncompatibleCheckpointDelta(#[from] IncompatibleCheckpointDelta), #[error( - "Empty or negative delta at partition `{partition_id}`: {from_position:?} >= \ + "empty or negative delta at partition `{partition_id}`: {from_position:?} >= \ {to_position:?}" )] EmptyOrNegativeDelta { diff --git a/quickwit/quickwit-metastore/src/error.rs b/quickwit/quickwit-metastore/src/error.rs index 2e0fdc2b1b2..f07bfe325e9 100644 --- a/quickwit/quickwit-metastore/src/error.rs +++ b/quickwit/quickwit-metastore/src/error.rs @@ -23,20 +23,20 @@ use quickwit_proto::metastore::MetastoreError; #[derive(Debug, thiserror::Error)] pub enum MetastoreResolverError { /// The metastore config is invalid. - #[error("Invalid metastore config: `{0}`")] + #[error("invalid metastore config: `{0}`")] InvalidConfig(String), /// The URI does not contain sufficient information to connect to the metastore. - #[error("Invalid metastore URI: `{0}`")] + #[error("invalid metastore URI: `{0}`")] InvalidUri(String), /// The requested backend is unsupported or unavailable. - #[error("Unsupported metastore backend: `{0}`")] + #[error("unsupported metastore backend: `{0}`")] UnsupportedBackend(String), /// The config and URI are valid, and are meant to be handled by this resolver, but the /// resolver failed to actually connect to the backend. e.g. connection error, credentials /// error, incompatible version, internal error in a third party, etc. - #[error("Failed to connect to metastore: `{0}`")] + #[error("failed to connect to metastore: `{0}`")] Initialization(#[from] MetastoreError), } diff --git a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_metastore_factory.rs b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_metastore_factory.rs index accdf832d24..10a34275791 100644 --- a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_metastore_factory.rs +++ b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/file_backed_metastore_factory.rs @@ -134,8 +134,8 @@ impl MetastoreFactory for FileBackedMetastoreFactory { } StorageResolverError::FailedToOpenStorage { kind, message } => { MetastoreResolverError::Initialization(MetastoreError::Internal { - message: format!("Failed to open metastore file `{uri}`."), - cause: format!("StorageError {kind:?}: {message}."), + message: format!("failed to open metastore file `{uri}`"), + cause: format!("StorageError {kind:?}: {message}"), }) } })?; diff --git a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/mod.rs b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/mod.rs index 417990fb9ac..39edca64945 100644 --- a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/mod.rs +++ b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/mod.rs @@ -342,10 +342,10 @@ impl Metastore for FileBackedMetastore { } } else if index_exists(&*self.storage, &index_id).await? { return Err(MetastoreError::Internal { - message: format!("Index {index_id} cannot be created."), + message: format!("index {index_id} cannot be created"), cause: format!( - "Index {index_id} is not present in the `indexes_states.json` file but its \ - file `{index_id}/metastore.json` is on the storage." + "index {index_id} is not present in the `indexes_states.json` file but its \ + file `{index_id}/metastore.json` is on the storage" ), }); } @@ -602,7 +602,7 @@ impl Metastore for FileBackedMetastore { ListIndexesQuery::All => build_regex_set_from_patterns(vec!["*".to_string()]), }; let index_matcher = index_matcher_result.map_err(|error| MetastoreError::Internal { - message: "Failed to build RegexSet from index patterns`".to_string(), + message: "failed to build RegexSet from index patterns`".to_string(), cause: error.to_string(), })?; @@ -785,15 +785,15 @@ async fn get_index_mutex( match index_state { IndexState::Alive(lazy_index) => lazy_index.get().await, IndexState::Creating => Err(MetastoreError::Internal { - message: format!("Index `{index_id}` cannot be retrieved."), - cause: "Index `{index_id}` is in transitioning state `Creating` and this should not \ - happened. Either recreate or delete it." + message: format!("index `{index_id}` cannot be retrieved"), + cause: "index `{index_id}` is in transitioning state `creating` and this should not \ + happened. either recreate or delete it" .to_string(), }), IndexState::Deleting => Err(MetastoreError::Internal { - message: format!("Index `{index_id}` cannot be retrieved."), - cause: "Index `{index_id}` is in transitioning state `Deleting` and this should not \ - happened. Try to delete it again." + message: format!("index `{index_id}` cannot be retrieved"), + cause: "index `{index_id}` is in transitioning state `deleting` and this should not \ + happened. try to delete it again" .to_string(), }), } @@ -806,7 +806,7 @@ async fn get_index_mutex( fn build_regex_set_from_patterns(patterns: Vec) -> anyhow::Result { // If there is a match all pattern, no need to go further. if patterns.iter().any(|pattern| pattern == "*") { - return Ok(RegexSet::new([".*".to_string()]).expect("Regex compilation shouldn't fail")); + return Ok(RegexSet::new([".*".to_string()]).expect("regex compilation shouldn't fail")); } let regexes: Vec = patterns .iter() @@ -965,8 +965,9 @@ mod tests { .times(1) .returning(move |path| block_on(ram_storage.get_all(path))); mock_storage.expect_put().times(1).returning(|_uri, _| { - Err(StorageErrorKind::Io - .with_error(anyhow::anyhow!("Oops. Some network problem maybe?"))) + Err(StorageErrorKind::Io.with_error(anyhow::anyhow!( + "oops. perhaps there are some network problems" + ))) }); let metastore = FileBackedMetastore::for_test(Arc::new(mock_storage)); @@ -1244,8 +1245,9 @@ mod tests { .times(1) .returning(move |path, _| { assert!(path == Path::new("indexes_states.json")); - Err(StorageErrorKind::Io - .with_error(anyhow::anyhow!("Oops. Some network problem maybe?"))) + Err(StorageErrorKind::Io.with_error(anyhow::anyhow!( + "oops. perhaps there are some network problems" + ))) }); mock_storage .expect_get_all() @@ -1290,8 +1292,9 @@ mod tests { if path == Path::new("indexes_states.json") { return block_on(ram_storage_clone.put(path, put_payload)); } - Err(StorageErrorKind::Io - .with_error(anyhow::anyhow!("Oops. Some network problem maybe?"))) + Err(StorageErrorKind::Io.with_error(anyhow::anyhow!( + "oops. perhaps there are some network problems" + ))) }); mock_storage .expect_get_all() @@ -1357,8 +1360,9 @@ mod tests { ); if path == Path::new("indexes_states.json") { if indexes_json_valid_put == 0 { - return Err(StorageErrorKind::Io - .with_error(anyhow::anyhow!("Oops. Some network problem maybe?"))); + return Err(StorageErrorKind::Io.with_error(anyhow::anyhow!( + "oops. perhaps there are some network problems" + ))); } indexes_json_valid_put -= 1; } @@ -1402,8 +1406,9 @@ mod tests { mock_storage // remove this if we end up changing the semantics of create. .expect_delete() .returning(|_| { - Err(StorageErrorKind::Io - .with_error(anyhow::anyhow!("Oops. Some network problem maybe?"))) + Err(StorageErrorKind::Io.with_error(anyhow::anyhow!( + "oops. perhaps there are some network problems" + ))) }); mock_storage .expect_put() @@ -1453,8 +1458,9 @@ mod tests { assert!(path == Path::new("indexes_states.json")); if path == Path::new("indexes_states.json") { if indexes_json_valid_put == 0 { - return Err(StorageErrorKind::Io - .with_error(anyhow::anyhow!("Oops. Some network problem maybe?"))); + return Err(StorageErrorKind::Io.with_error(anyhow::anyhow!( + "oops. perhaps there are some network problems" + ))); } indexes_json_valid_put -= 1; } @@ -1638,8 +1644,8 @@ mod tests { &build_regex_exprs_from_pattern("index-**-1") .unwrap_err() .to_string(), - "Index ID pattern `index-**-1` is invalid. Patterns must not contain multiple \ - consecutive `*`.", + "index ID pattern `index-**-1` is invalid. patterns must not contain multiple \ + consecutive `*`", ); assert!(build_regex_exprs_from_pattern("-index-1").is_err()); } diff --git a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/store_operations.rs b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/store_operations.rs index ac73ada2ceb..b0d5d2bce9c 100644 --- a/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/store_operations.rs +++ b/quickwit/quickwit-metastore/src/metastore/file_backed_metastore/store_operations.rs @@ -64,10 +64,10 @@ fn convert_error(index_id: &str, storage_err: StorageError) -> MetastoreError { index_id: index_id.to_string(), }), StorageErrorKind::Unauthorized => MetastoreError::Forbidden { - message: "The request credentials do not allow for this operation.".to_string(), + message: "the request credentials do not allow for this operation".to_string(), }, _ => MetastoreError::Internal { - message: "Failed to get index files.".to_string(), + message: "failed to get index files".to_string(), cause: storage_err.to_string(), }, } @@ -100,7 +100,7 @@ pub(crate) async fn fetch_or_init_indexes_states( .get_all(indexes_list_path) .await .map_err(|storage_err| MetastoreError::Internal { - message: format!("Failed to get `{INDEXES_STATES_FILENAME}` file."), + message: format!("failed to get `{INDEXES_STATES_FILENAME}` file"), cause: storage_err.to_string(), })?; let indexes_states_deserialized: HashMap = @@ -140,7 +140,7 @@ pub(crate) async fn put_indexes_states( let content: Vec = serde_json::to_vec_pretty(&indexes_states_serializable).map_err(|serde_err| { MetastoreError::Internal { - message: "Failed to serialize indexes map".to_string(), + message: "failed to serialize indexes map".to_string(), cause: serde_err.to_string(), } })?; @@ -148,7 +148,7 @@ pub(crate) async fn put_indexes_states( .put(indexes_list_path, Box::new(content)) .await .map_err(|storage_err| MetastoreError::Internal { - message: format!("Failed to put `{INDEXES_STATES_FILENAME}` file."), + message: format!("failed to put `{INDEXES_STATES_FILENAME}` file"), cause: storage_err.to_string(), })?; Ok(()) @@ -173,9 +173,9 @@ pub(crate) async fn fetch_index( if index.index_id() != index_id { return Err(MetastoreError::Internal { - message: "Inconsistent manifest: index_id mismatch.".to_string(), + message: "inconsistent manifest: index_id mismatch".to_string(), cause: format!( - "Expected index_id `{}`, but found `{}`", + "expected index_id `{}`, but found `{}`", index_id, index.index_id() ), @@ -205,7 +205,7 @@ pub(crate) async fn put_index_given_index_id( // Serialize Index. let content: Vec = serde_json::to_vec_pretty(&index).map_err(|serde_err| MetastoreError::Internal { - message: "Failed to serialize Metadata set".to_string(), + message: "failed to serialize metadata set".to_string(), cause: serde_err.to_string(), })?; @@ -246,11 +246,11 @@ pub(crate) async fn delete_index(storage: &dyn Storage, index_id: &str) -> Metas .await .map_err(|storage_err| match storage_err.kind() { StorageErrorKind::Unauthorized => MetastoreError::Forbidden { - message: "The request credentials do not allow for this operation.".to_string(), + message: "the request credentials do not allow for this operation".to_string(), }, _ => MetastoreError::Internal { message: format!( - "Failed to write metastore file to `{}`.", + "failed to write metastore file to `{}`", metadata_path.display() ), cause: storage_err.to_string(), diff --git a/quickwit/quickwit-metastore/src/metastore/grpc_metastore/mod.rs b/quickwit/quickwit-metastore/src/metastore/grpc_metastore/mod.rs index 45554f5cecf..8d32d4a8fb4 100644 --- a/quickwit/quickwit-metastore/src/metastore/grpc_metastore/mod.rs +++ b/quickwit/quickwit-metastore/src/metastore/grpc_metastore/mod.rs @@ -109,7 +109,7 @@ impl MetastoreGrpcClient { let client = client.take(); async move { client.ok_or_else(|| { - std::io::Error::new(std::io::ErrorKind::Other, "Client already taken") + std::io::Error::new(std::io::ErrorKind::Other, "client already taken") }) } })) @@ -132,7 +132,7 @@ impl MetastoreGrpcClient { impl Metastore for MetastoreGrpcClient { async fn check_connectivity(&self) -> anyhow::Result<()> { if self.balance_channel.num_connections() == 0 { - bail!("The metastore service is unavailable."); + bail!("the metastore service is unavailable"); } Ok(()) } @@ -564,8 +564,8 @@ pub fn parse_grpc_error(grpc_error: &Status) -> MetastoreError { if elapsed_error_opt.is_some() { return MetastoreError::Connection { - message: "gRPC request timeout triggered by the channel timeout. This can happens \ - when tonic channel has no registered endpoints." + message: "gRPC request timeout triggered by the channel timeout. this can happens \ + when tonic channel has no registered endpoints" .to_string(), }; } diff --git a/quickwit/quickwit-metastore/src/metastore/index_metadata/serialize.rs b/quickwit/quickwit-metastore/src/metastore/index_metadata/serialize.rs index 75a0a02846e..1e4acd49769 100644 --- a/quickwit/quickwit-metastore/src/metastore/index_metadata/serialize.rs +++ b/quickwit/quickwit-metastore/src/metastore/index_metadata/serialize.rs @@ -91,7 +91,7 @@ impl TryFrom for IndexMetadata { let mut sources: HashMap = Default::default(); for source in v0_6.sources { if sources.contains_key(&source.source_id) { - anyhow::bail!("Source `{}` is defined more than once", source.source_id); + anyhow::bail!("source `{}` is defined more than once", source.source_id); } sources.insert(source.source_id.clone(), source); } diff --git a/quickwit/quickwit-metastore/src/metastore/mod.rs b/quickwit/quickwit-metastore/src/metastore/mod.rs index 84f6807c4df..5cc338ca31c 100644 --- a/quickwit/quickwit-metastore/src/metastore/mod.rs +++ b/quickwit/quickwit-metastore/src/metastore/mod.rs @@ -425,7 +425,7 @@ impl ListSplitsQuery { pub fn try_from_index_uids(index_uids: Vec) -> MetastoreResult { if index_uids.is_empty() { return Err(MetastoreError::Internal { - message: "ListSplitQuery should define at least one index uid.".to_string(), + message: "ListSplitQuery should define at least one index uid".to_string(), cause: "".to_string(), }); } diff --git a/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs b/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs index dcdff57b947..df3ab2d163c 100644 --- a/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs +++ b/quickwit/quickwit-metastore/src/metastore/postgresql_metastore.rs @@ -82,7 +82,7 @@ async fn establish_connection( .connect_with(pg_connect_options) .await .map_err(|error| { - error!(connection_uri=%connection_uri, error=?error, "Failed to establish connection to database."); + error!(connection_uri=%connection_uri, error=?error, "failed to establish connection to database"); MetastoreError::Connection { message: error.to_string(), } @@ -99,7 +99,7 @@ async fn run_postgres_migrations(pool: &Pool) -> MetastoreResult<()> { tx.rollback().await?; error!(err=?migration_err, "Database migrations failed"); return Err(MetastoreError::Internal { - message: "Failed to run migration on Postgresql database.".to_string(), + message: "failed to run migration on Postgresql database".to_string(), cause: migration_err.to_string(), }); } @@ -372,7 +372,7 @@ fn convert_sqlx_err(index_id: &str, sqlx_err: sqlx::Error) -> MetastoreError { (pg_error_code::UNIQUE_VIOLATION, _) => { error!(pg_db_err=?boxed_db_err, "postgresql-error"); MetastoreError::Internal { - message: "Unique key violation.".to_string(), + message: "unique key violation".to_string(), cause: format!("DB error {boxed_db_err:?}"), } } @@ -479,7 +479,7 @@ impl Metastore for PostgresqlMetastore { ListIndexesQuery::IndexIdPatterns(index_id_patterns) => { build_index_id_patterns_sql_query(index_id_patterns).map_err(|error| { MetastoreError::Internal { - message: "Failed to build `list_indexes_metadatas` SQL query".to_string(), + message: "failed to build `list_indexes_metadatas` SQL query".to_string(), cause: error.to_string(), } })? @@ -1294,7 +1294,7 @@ fn build_index_id_patterns_sql_query(index_id_patterns: Vec) -> anyhow:: let mut where_like_query = String::new(); for (index_id_pattern_idx, index_id_pattern) in index_id_patterns.iter().enumerate() { validate_index_id_pattern(index_id_pattern).map_err(|error| MetastoreError::Internal { - message: "Failed to build list indexes query".to_string(), + message: "failed to build list indexes query".to_string(), cause: error.to_string(), })?; if index_id_pattern.contains('*') { @@ -1360,7 +1360,7 @@ impl MetastoreFactory for PostgresqlMetastoreFactory { debug!("metastore not found in cache"); let postgresql_metastore_config = metastore_config.as_postgres().ok_or_else(|| { let message = format!( - "Expected PostgreSQL metastore config, got `{:?}`.", + "expected PostgreSQL metastore config, got `{:?}`", metastore_config.backend() ); MetastoreResolverError::InvalidConfig(message) @@ -1677,9 +1677,9 @@ mod tests { build_index_id_patterns_sql_query(vec!["*-index-*-&-last**".to_string()]) .unwrap_err() .to_string(), - "Internal error: Failed to build list indexes query Cause: `Index ID pattern \ - `*-index-*-&-last**` is invalid. Patterns must match the following regular \ - expression: `^[a-zA-Z\\*][a-zA-Z0-9-_\\.\\*]{0,254}$`.`." + "internal error: failed to build list indexes query; cause: `index ID pattern \ + `*-index-*-&-last**` is invalid. patterns must match the following regular \ + expression: `^[a-zA-Z\\*][a-zA-Z0-9-_\\.\\*]{0,254}$`.`" ); } } diff --git a/quickwit/quickwit-metastore/src/metastore_resolver.rs b/quickwit/quickwit-metastore/src/metastore_resolver.rs index 780aee86095..964a8d15431 100644 --- a/quickwit/quickwit-metastore/src/metastore_resolver.rs +++ b/quickwit/quickwit-metastore/src/metastore_resolver.rs @@ -64,7 +64,7 @@ impl MetastoreResolver { Protocol::PostgreSQL => MetastoreBackend::PostgreSQL, _ => { return Err(MetastoreResolverError::UnsupportedBackend( - "no implementation exists for this backend.".to_string(), + "no implementation exists for this backend".to_string(), )) } }; @@ -72,7 +72,7 @@ impl MetastoreResolver { .per_backend_factories .get(&backend) .ok_or(MetastoreResolverError::UnsupportedBackend( - "no metastore factory is registered for this backend.".to_string(), + "no metastore factory is registered for this backend".to_string(), ))?; let metastore = metastore_factory.resolve(metastore_config, uri).await?; Ok(metastore) @@ -158,7 +158,7 @@ impl MetastoreResolverBuilder { for (metastore_factory, metastore_config) in self.per_protocol_factories.values() { ensure!( metastore_factory.backend() == metastore_config.backend(), - "Metastore factory and config backends do not match: {:?} vs. {:?}.", + "Metastore factory and config backends do not match: {:?} vs. {:?}", metastore_factory.backend(), metastore_config.backend(), ); diff --git a/quickwit/quickwit-metastore/src/split_metadata.rs b/quickwit/quickwit-metastore/src/split_metadata.rs index d471324b644..6fd9b3d4361 100644 --- a/quickwit/quickwit-metastore/src/split_metadata.rs +++ b/quickwit/quickwit-metastore/src/split_metadata.rs @@ -283,7 +283,7 @@ impl FromStr for SplitState { "MarkedForDeletion" => SplitState::MarkedForDeletion, "ScheduledForDeletion" => SplitState::MarkedForDeletion, // Deprecated "New" => SplitState::Staged, // Deprecated - _ => return Err(format!("Unknown split state `{input}`.")), + _ => return Err(format!("unknown split state `{input}`")), }; Ok(split_state) } diff --git a/quickwit/quickwit-opentelemetry/src/otlp/logs.rs b/quickwit/quickwit-opentelemetry/src/otlp/logs.rs index 57489df3a8a..9cbd91efb87 100644 --- a/quickwit/quickwit-opentelemetry/src/otlp/logs.rs +++ b/quickwit/quickwit-opentelemetry/src/otlp/logs.rs @@ -245,7 +245,7 @@ impl OtlpGrpcLogsService { .await .map_err(|join_error| { error!("Failed to parse log records: {join_error:?}"); - Status::internal("Failed to parse log records.") + Status::internal("failed to parse log records") })??; if num_log_records == num_parse_errors { return Err(tonic::Status::internal(error_message)); diff --git a/quickwit/quickwit-opentelemetry/src/otlp/span_id.rs b/quickwit/quickwit-opentelemetry/src/otlp/span_id.rs index 50d7a30a522..55f2fb2c21f 100644 --- a/quickwit/quickwit-opentelemetry/src/otlp/span_id.rs +++ b/quickwit/quickwit-opentelemetry/src/otlp/span_id.rs @@ -54,7 +54,7 @@ impl<'de> Deserialize<'de> for SpanId { if b64span_id.len() != SpanId::BASE64_LENGTH { let message = format!( - "Base64 span ID must be {} bytes long, got {}.", + "base64 span ID must be {} bytes long, got {}", SpanId::BASE64_LENGTH, b64span_id.len() ); @@ -66,7 +66,7 @@ impl<'de> Deserialize<'de> for SpanId { // estimate and fails. .decode_slice_unchecked(b64span_id.as_bytes(), &mut span_id) .map_err(|error| { - let message = format!("Failed to decode Base64 span ID: {:?}.", error); + let message = format!("failed to decode base64 span ID: {:?}", error); de::Error::custom(message) })?; Ok(SpanId(span_id)) @@ -74,7 +74,7 @@ impl<'de> Deserialize<'de> for SpanId { } #[derive(Debug, thiserror::Error)] -#[error("Span ID must be 8 bytes long, got {0}.")] +#[error("span ID must be 8 bytes long, got {0}")] pub struct TryFromSpanIdError(usize); impl TryFrom<&[u8]> for SpanId { diff --git a/quickwit/quickwit-opentelemetry/src/otlp/trace_id.rs b/quickwit/quickwit-opentelemetry/src/otlp/trace_id.rs index 1007dd56caa..781e717859e 100644 --- a/quickwit/quickwit-opentelemetry/src/otlp/trace_id.rs +++ b/quickwit/quickwit-opentelemetry/src/otlp/trace_id.rs @@ -60,7 +60,7 @@ impl<'de> Deserialize<'de> for TraceId { if b64trace_id.len() != TraceId::BASE64_LENGTH { let message = format!( - "Base64 trace ID must be {} bytes long, got {}.", + "base64 trace ID must be {} bytes long, got {}", TraceId::BASE64_LENGTH, b64trace_id.len() ); @@ -72,7 +72,7 @@ impl<'de> Deserialize<'de> for TraceId { // estimate and fails. .decode_slice_unchecked(b64trace_id.as_bytes(), &mut trace_id) .map_err(|error| { - let message = format!("Failed to decode Base64 trace ID: {:?}.", error); + let message = format!("failed to decode base64 trace ID: {:?}", error); de::Error::custom(message) })?; Ok(TraceId(trace_id)) @@ -80,7 +80,7 @@ impl<'de> Deserialize<'de> for TraceId { } #[derive(Debug, thiserror::Error)] -#[error("Trace ID must be 16 bytes long, got {0}.")] +#[error("trace ID must be 16 bytes long, got {0}")] pub struct TryFromTraceIdError(usize); impl TryFrom<&[u8]> for TraceId { diff --git a/quickwit/quickwit-proto/src/control_plane/mod.rs b/quickwit/quickwit-proto/src/control_plane/mod.rs index 644b9c7bc31..e69c9ccfb0b 100644 --- a/quickwit/quickwit-proto/src/control_plane/mod.rs +++ b/quickwit/quickwit-proto/src/control_plane/mod.rs @@ -29,11 +29,11 @@ pub type ControlPlaneResult = std::result::Result; #[derive(Debug, thiserror::Error)] pub enum ControlPlaneError { - #[error("An internal error occurred: {0}.")] + #[error("an internal error occurred: {0}")] Internal(String), - #[error("An internal error occurred: {0}.")] + #[error("an internal error occurred: {0}")] Metastore(#[from] MetastoreError), - #[error("Control plane is unavailable: {0}.")] + #[error("control plane is unavailable: {0}")] Unavailable(String), } @@ -41,7 +41,7 @@ impl From for MetastoreError { fn from(error: ControlPlaneError) -> Self { match error { ControlPlaneError::Internal(message) => MetastoreError::Internal { - message: "TODO".to_string(), + message: "todo".to_string(), cause: message, }, ControlPlaneError::Metastore(error) => error, @@ -76,10 +76,10 @@ impl From> for ControlPlaneError { match error { AskError::ErrorReply(error) => error, AskError::MessageNotDelivered => { - ControlPlaneError::Unavailable("Request not delivered".to_string()) + ControlPlaneError::Unavailable("request not delivered".to_string()) } AskError::ProcessMessageError => ControlPlaneError::Internal( - "An error occurred while processing the request".to_string(), + "an error occurred while processing the request".to_string(), ), } } diff --git a/quickwit/quickwit-proto/src/indexing/mod.rs b/quickwit/quickwit-proto/src/indexing/mod.rs index 6908bf6e376..0d466140e28 100644 --- a/quickwit/quickwit-proto/src/indexing/mod.rs +++ b/quickwit/quickwit-proto/src/indexing/mod.rs @@ -31,31 +31,31 @@ pub type IndexingResult = std::result::Result; #[derive(Debug, thiserror::Error)] pub enum IndexingError { - #[error("Indexing pipeline `{index_id}` for source `{source_id}` does not exist.")] + #[error("indexing pipeline `{index_id}` for source `{source_id}` does not exist")] MissingPipeline { index_id: String, source_id: String }, #[error( - "Pipeline #{pipeline_ord} for index `{index_id}` and source `{source_id}` already exists." + "pipeline #{pipeline_ord} for index `{index_id}` and source `{source_id}` already exists" )] PipelineAlreadyExists { index_id: String, source_id: String, pipeline_ord: usize, }, - #[error("I/O Error `{0}`.")] + #[error("I/O error `{0}`")] Io(io::Error), - #[error("Invalid params `{0}`.")] + #[error("invalid params `{0}`")] InvalidParams(anyhow::Error), - #[error("Spanw pipelines errors `{pipeline_ids:?}`.")] + #[error("Spanw pipelines errors `{pipeline_ids:?}`")] SpawnPipelinesError { pipeline_ids: Vec, }, - #[error("A metastore error occurred: {0}.")] + #[error("a metastore error occurred: {0}")] MetastoreError(String), - #[error("A storage resolver error occurred: {0}.")] + #[error("a storage resolver error occurred: {0}")] StorageResolverError(String), - #[error("An internal error occurred: {0}.")] + #[error("an internal error occurred: {0}")] Internal(String), - #[error("The ingest service is unavailable.")] + #[error("the ingest service is unavailable")] Unavailable, } @@ -65,26 +65,26 @@ impl From for tonic::Status { IndexingError::MissingPipeline { index_id, source_id, - } => tonic::Status::not_found(format!("Missing pipeline {index_id}/{source_id}")), + } => tonic::Status::not_found(format!("missing pipeline {index_id}/{source_id}")), IndexingError::PipelineAlreadyExists { index_id, source_id, pipeline_ord, } => tonic::Status::already_exists(format!( - "Pipeline {index_id}/{source_id} {pipeline_ord} already exists " + "pipeline {index_id}/{source_id} {pipeline_ord} already exists " )), IndexingError::Io(error) => tonic::Status::internal(error.to_string()), IndexingError::InvalidParams(error) => { tonic::Status::invalid_argument(error.to_string()) } IndexingError::SpawnPipelinesError { pipeline_ids } => { - tonic::Status::internal(format!("Error spawning pipelines {:?}", pipeline_ids)) + tonic::Status::internal(format!("error spawning pipelines {:?}", pipeline_ids)) } IndexingError::Internal(string) => tonic::Status::internal(string), IndexingError::MetastoreError(string) => tonic::Status::internal(string), IndexingError::StorageResolverError(string) => tonic::Status::internal(string), IndexingError::Unavailable => { - tonic::Status::unavailable("Indexing service is unavailable.") + tonic::Status::unavailable("indexing service is unavailable") } } } @@ -133,7 +133,7 @@ impl From> for IndexingError { AskError::ErrorReply(error) => error, AskError::MessageNotDelivered => IndexingError::Unavailable, AskError::ProcessMessageError => IndexingError::Internal( - "An error occurred while processing the request".to_string(), + "an error occurred while processing the request".to_string(), ), } } @@ -160,13 +160,13 @@ impl TryFrom<&str> for IndexingTask { let mut iter = index_task_str.rsplit(':'); let source_id = iter.next().ok_or_else(|| { anyhow!( - "Invalid index task format, cannot find source_id in `{}`", + "invalid index task format, cannot find source_id in `{}`", index_task_str ) })?; let part1 = iter.next().ok_or_else(|| { anyhow!( - "Invalid index task format, cannot find index_id in `{}`", + "invalid index task format, cannot find index_id in `{}`", index_task_str ) })?; @@ -218,11 +218,11 @@ mod tests { #[test] fn test_indexing_task_serialization_errors() { assert_eq!( - "Invalid index task format, cannot find index_id in ``", + "invalid index task format, cannot find index_id in ``", IndexingTask::try_from("").unwrap_err().to_string() ); assert_eq!( - "Invalid index task format, cannot find index_id in `foo`", + "invalid index task format, cannot find index_id in `foo`", IndexingTask::try_from("foo").unwrap_err().to_string() ); } diff --git a/quickwit/quickwit-proto/src/ingest/mod.rs b/quickwit/quickwit-proto/src/ingest/mod.rs index 4a1dca64c40..6d12f05b99d 100644 --- a/quickwit/quickwit-proto/src/ingest/mod.rs +++ b/quickwit/quickwit-proto/src/ingest/mod.rs @@ -32,13 +32,13 @@ pub type IngestV2Result = std::result::Result; #[derive(Debug, Clone, thiserror::Error)] pub enum IngestV2Error { - #[error("An internal error occurred: {0}.")] + #[error("an internal error occurred: {0}")] Internal(String), - #[error("Failed to connect to ingester `{ingester_id}`.")] + #[error("failed to connect to ingester `{ingester_id}`")] IngesterUnavailable { ingester_id: NodeId }, #[error( - "Ingest service is currently unavailable with {num_ingesters} in the cluster and a \ - replication factor of {replication_factor}." + "ingest service is currently unavailable with {num_ingesters} in the cluster and a \ + replication factor of {replication_factor}" )] ServiceUnavailable { num_ingesters: usize, @@ -50,7 +50,7 @@ pub enum IngestV2Error { // source_id: SourceId, // shard_id: ShardId, // }, - #[error("Failed to open or write to shard.")] + #[error("failed to open or write to shard")] ShardUnavailable { leader_id: NodeId, index_uid: IndexUid, diff --git a/quickwit/quickwit-proto/src/metastore/mod.rs b/quickwit/quickwit-proto/src/metastore/mod.rs index 421ea1d9b53..2e18e822811 100644 --- a/quickwit/quickwit-proto/src/metastore/mod.rs +++ b/quickwit/quickwit-proto/src/metastore/mod.rs @@ -99,46 +99,46 @@ impl fmt::Display for EntityKind { #[derive(Debug, Clone, thiserror::Error, Eq, PartialEq, Serialize, Deserialize)] pub enum MetastoreError { - #[error("{0} already exist(s).")] + #[error("{0} already exist(s)")] AlreadyExists(EntityKind), - #[error("Connection error: {message}.")] + #[error("connection error: {message}")] Connection { message: String }, - #[error("Database error: {message}.")] + #[error("database error: {message}")] Db { message: String }, - #[error("Precondition failed for {entity}: {message}")] + #[error("precondition failed for {entity}: {message}")] FailedPrecondition { entity: EntityKind, message: String }, - #[error("Access forbidden: {message}.")] + #[error("access forbidden: {message}")] Forbidden { message: String }, - #[error("Internal error: {message} Cause: `{cause}`.")] + #[error("internal error: {message}; cause: `{cause}`")] Internal { message: String, cause: String }, - #[error("Invalid argument: {message}.")] + #[error("invalid argument: {message}")] InvalidArgument { message: String }, - #[error("IO error: {message}.")] + #[error("IO error: {message}")] Io { message: String }, - #[error("Failed to deserialize `{struct_name}` from JSON: {message}.")] + #[error("failed to deserialize `{struct_name}` from JSON: {message}")] JsonDeserializeError { struct_name: String, message: String, }, - #[error("Failed to serialize `{struct_name}` to JSON: {message}.")] + #[error("failed to serialize `{struct_name}` to JSON: {message}")] JsonSerializeError { struct_name: String, message: String, }, - #[error("{0} do(es) not exist.")] + #[error("{0} do(es) not exist")] NotFound(EntityKind), - #[error("Metastore unavailable: {0}.")] + #[error("metastore unavailable: {0}")] Unavailable(String), } @@ -155,7 +155,7 @@ impl From for tonic::Status { fn from(metastore_error: MetastoreError) -> Self { let grpc_code = metastore_error.status_code().to_grpc_status_code(); let error_msg = serde_json::to_string(&metastore_error) - .unwrap_or_else(|_| format!("Raw metastore error: {metastore_error}")); + .unwrap_or_else(|_| format!("raw metastore error: {metastore_error}")); tonic::Status::new(grpc_code, error_msg) } } diff --git a/quickwit/quickwit-query/src/elastic_query_dsl/multi_match.rs b/quickwit/quickwit-query/src/elastic_query_dsl/multi_match.rs index e8d16b7b661..0a6685590ad 100644 --- a/quickwit/quickwit-query/src/elastic_query_dsl/multi_match.rs +++ b/quickwit/quickwit-query/src/elastic_query_dsl/multi_match.rs @@ -92,13 +92,13 @@ fn deserialize_match_query_for_one_field( fn validate_field_name(field_name: &str) -> Result<(), String> { if field_name.contains('^') { return Err(format!( - "Quickwit does not support field boosting in the multi match query fields (Got `{}`)", + "Quickwit does not support field boosting in the multi match query fields (got `{}`)", field_name )); } if field_name.contains('*') { return Err(format!( - "Quickwit does not support wildcards in the multi match query fields (Got `{}`)", + "Quickwit does not support wildcards in the multi match query fields (got `{}`)", field_name )); } @@ -111,8 +111,8 @@ impl TryFrom for MultiMatchQuery { fn try_from(multi_match_query: MultiMatchQueryForDeserialization) -> Result { if multi_match_query.fields.is_empty() { return Err(serde::de::Error::custom( - "Quickwit does not support multi match query with 0 fields. MultiMatchQuery must \ - have at least one field.", + "Quickwit does not support multi match query with 0 fields. MultiMatchQueries \ + must have at least one field.", )); } for field in &multi_match_query.fields { diff --git a/quickwit/quickwit-query/src/elastic_query_dsl/one_field_map.rs b/quickwit/quickwit-query/src/elastic_query_dsl/one_field_map.rs index 5880a11ed29..5b2d114802b 100644 --- a/quickwit/quickwit-query/src/elastic_query_dsl/one_field_map.rs +++ b/quickwit/quickwit-query/src/elastic_query_dsl/one_field_map.rs @@ -56,18 +56,18 @@ impl<'de, V: Deserialize<'de>> Visitor<'de> for OneFieldMapVisitor { if let Some(num_keys) = map.size_hint() { if num_keys != 1 { return Err(serde::de::Error::custom(format!( - "Expected a single field. Got {num_keys}." + "expected a single field. got {num_keys}" ))); } } let Some((key, val)) = map.next_entry()? else { return Err(serde::de::Error::custom( - "Expected a single field. Got none.", + "expected a single field. got none", )); }; if let Some(second_key) = map.next_key::()? { return Err(serde::de::Error::custom(format!( - "Expected a single field. Got several ({key}, {second_key}, ...)." + "expected a single field. got several ({key}, {second_key}, ...)" ))); } Ok(OneFieldMap { @@ -118,7 +118,7 @@ mod tests { "my-field2": {"count": 2} })); let deser_err = deser.unwrap_err(); - assert_eq!(deser_err.to_string(), "Expected a single field. Got 2."); + assert_eq!(deser_err.to_string(), "expected a single field. got 2"); } #[test] @@ -126,6 +126,6 @@ mod tests { let deser: serde_json::Result> = serde_json::from_value(serde_json::json!({})); let deser_err = deser.unwrap_err(); - assert_eq!(deser_err.to_string(), "Expected a single field. Got 0."); + assert_eq!(deser_err.to_string(), "expected a single field. got 0"); } } diff --git a/quickwit/quickwit-query/src/elastic_query_dsl/query_string_query.rs b/quickwit/quickwit-query/src/elastic_query_dsl/query_string_query.rs index fc920a26d17..8c3688fe2ce 100644 --- a/quickwit/quickwit-query/src/elastic_query_dsl/query_string_query.rs +++ b/quickwit/quickwit-query/src/elastic_query_dsl/query_string_query.rs @@ -50,7 +50,7 @@ pub(crate) struct QueryStringQuery { impl ConvertableToQueryAst for QueryStringQuery { fn convert_to_query_ast(self) -> anyhow::Result { if self.default_field.is_some() && self.fields.is_some() { - anyhow::bail!("Fields and default_field cannot be both set in `query_string` queries."); + anyhow::bail!("fields and default_field cannot be both set in `query_string` queries"); } let default_fields: Option> = self .default_field diff --git a/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs b/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs index 1724dc90c8f..fcda98ec702 100644 --- a/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs +++ b/quickwit/quickwit-query/src/elastic_query_dsl/range_query.rs @@ -58,7 +58,7 @@ impl ConvertableToQueryAst for RangeQuery { field, lower_bound: match (gt, gte) { (Some(_gt), Some(_gte)) => { - anyhow::bail!("Both gt and gte are set") + anyhow::bail!("both gt and gte are set") } (Some(gt), None) => Bound::Excluded(gt), (None, Some(gte)) => Bound::Included(gte), @@ -66,7 +66,7 @@ impl ConvertableToQueryAst for RangeQuery { }, upper_bound: match (lt, lte) { (Some(_lt), Some(_lte)) => { - anyhow::bail!("Both lt and lte are set") + anyhow::bail!("both lt and lte are set") } (Some(lt), None) => Bound::Excluded(lt), (None, Some(lte)) => Bound::Included(lte), diff --git a/quickwit/quickwit-query/src/error.rs b/quickwit/quickwit-query/src/error.rs index 44ac0bb489e..464a8d87d32 100644 --- a/quickwit/quickwit-query/src/error.rs +++ b/quickwit/quickwit-query/src/error.rs @@ -21,31 +21,31 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum InvalidQuery { - #[error("Query is incompatible with schema. {0}).")] + #[error("query is incompatible with schema. {0})")] SchemaError(String), - #[error("Expected `{expected_value_type}` boundary for field `{field_name}`")] + #[error("expected `{expected_value_type}` boundary for field `{field_name}`")] InvalidBoundary { expected_value_type: &'static str, field_name: String, }, #[error( - "Expected a `{expected_value_type}` search value for field `{field_name}`. Got `{value}`." + "expected a `{expected_value_type}` search value for field `{field_name}`, got `{value}`" )] InvalidSearchTerm { expected_value_type: &'static str, field_name: String, value: String, }, - #[error("Range query on `{value_type}` field (`{field_name}`) forbidden")] + #[error("range query on `{value_type}` field (`{field_name}`) forbidden")] RangeQueryNotSupportedForField { value_type: &'static str, field_name: String, }, - #[error("Field does not exist: `{full_path}`")] + #[error("field does not exist: `{full_path}`")] FieldDoesNotExist { full_path: String }, #[error("Json field root is not a valid search field: `{full_path}`")] JsonFieldRootNotSearchable { full_path: String }, - #[error("User query should have been parsed")] + #[error("user query should have been parsed")] UserQueryNotParsed, #[error("{0}")] Other(#[from] anyhow::Error), diff --git a/quickwit/quickwit-query/src/not_nan_f32.rs b/quickwit/quickwit-query/src/not_nan_f32.rs index cc36f46f3d6..62f33f18130 100644 --- a/quickwit/quickwit-query/src/not_nan_f32.rs +++ b/quickwit/quickwit-query/src/not_nan_f32.rs @@ -39,7 +39,7 @@ impl TryFrom for NotNaNf32 { fn try_from(possibly_nan: f32) -> Result { if possibly_nan.is_nan() { - return Err("NaN is not supported as a boost value."); + return Err("NaN is not supported as a boost value"); } Ok(NotNaNf32(possibly_nan)) } diff --git a/quickwit/quickwit-query/src/query_ast/field_presence.rs b/quickwit/quickwit-query/src/query_ast/field_presence.rs index ac3fb3e0082..153f5be2794 100644 --- a/quickwit/quickwit-query/src/query_ast/field_presence.rs +++ b/quickwit/quickwit-query/src/query_ast/field_presence.rs @@ -78,7 +78,7 @@ impl BuildTantivyAst for FieldPresenceQuery { _with_validation: bool, ) -> Result { let field_presence_field = schema.get_field(FIELD_PRESENCE_FIELD_NAME).map_err(|_| { - InvalidQuery::SchemaError("Field presence is not available for this split.".to_string()) + InvalidQuery::SchemaError("field presence is not available for this split".to_string()) })?; let (field, field_entry, path) = find_field_or_hit_dynamic(&self.field, schema)?; if field_entry.is_fast() { diff --git a/quickwit/quickwit-query/src/query_ast/full_text_query.rs b/quickwit/quickwit-query/src/query_ast/full_text_query.rs index 567ed3a2fe8..be2e74dba43 100644 --- a/quickwit/quickwit-query/src/query_ast/full_text_query.rs +++ b/quickwit/quickwit-query/src/query_ast/full_text_query.rs @@ -60,7 +60,7 @@ impl FullTextParams { .unwrap_or(text_field_indexing.tokenizer()); tokenizer_manager .get(tokenizer_name) - .with_context(|| format!("No tokenizer named `{}` is registered.", tokenizer_name)) + .with_context(|| format!("no tokenizer named `{}` is registered", tokenizer_name)) } pub(crate) fn tokenize_text_into_terms_json( diff --git a/quickwit/quickwit-query/src/query_ast/phrase_prefix_query.rs b/quickwit/quickwit-query/src/query_ast/phrase_prefix_query.rs index 8279563acc0..ace1f2b1ad8 100644 --- a/quickwit/quickwit-query/src/query_ast/phrase_prefix_query.rs +++ b/quickwit/quickwit-query/src/query_ast/phrase_prefix_query.rs @@ -53,14 +53,14 @@ impl PhrasePrefixQuery { FieldType::Str(ref text_options) => { let text_field_indexing = text_options.get_indexing_options().ok_or_else(|| { InvalidQuery::SchemaError(format!( - "Field {} is not full-text searchable", + "field {} is not full-text searchable", field_entry.name() )) })?; if !text_field_indexing.index_option().has_positions() { return Err(InvalidQuery::SchemaError( - "Trying to run a PhrasePrefix query on a field which does not have \ - positions indexed." + "trying to run a phrase prefix query on a field which does not have \ + positions indexed" .to_string(), )); } @@ -77,14 +77,14 @@ impl PhrasePrefixQuery { let text_field_indexing = json_options.get_text_indexing_options().ok_or_else(|| { InvalidQuery::SchemaError(format!( - "Field {} is not full-text searchable", + "field {} is not full-text searchable", field_entry.name() )) })?; if !text_field_indexing.index_option().has_positions() { return Err(InvalidQuery::SchemaError( - "Trying to run a PhrasePrefix query on a field which does not have \ - positions indexed." + "trying to run a PhrasePrefix query on a field which does not have \ + positions indexed" .to_string(), )); } @@ -98,7 +98,7 @@ impl PhrasePrefixQuery { Ok((field, terms)) } _ => Err(InvalidQuery::SchemaError( - "Trying to run a PhrasePrefix query on a non-text field.".to_string(), + "trying to run a PhrasePrefix query on a non-text field".to_string(), )), } } diff --git a/quickwit/quickwit-query/src/query_ast/range_query.rs b/quickwit/quickwit-query/src/query_ast/range_query.rs index 71e29eff18c..a053675821a 100644 --- a/quickwit/quickwit-query/src/query_ast/range_query.rs +++ b/quickwit/quickwit-query/src/query_ast/range_query.rs @@ -231,7 +231,7 @@ impl BuildTantivyAst for RangeQuery { super::utils::find_field_or_hit_dynamic(&self.field, schema)?; if !field_entry.is_fast() { return Err(InvalidQuery::SchemaError(format!( - "Range queries are only supported for fast fields. (`{}` is not a fast field)", + "range queries are only supported for fast fields. (`{}` is not a fast field)", field_entry.name() ))); } diff --git a/quickwit/quickwit-query/src/query_ast/user_input_query.rs b/quickwit/quickwit-query/src/query_ast/user_input_query.rs index 1ea12eb0c71..450a9485065 100644 --- a/quickwit/quickwit-query/src/query_ast/user_input_query.rs +++ b/quickwit/quickwit-query/src/query_ast/user_input_query.rs @@ -66,7 +66,7 @@ impl UserInputQuery { .map(|search_fields| &search_fields[..]) .unwrap_or(default_search_fields); let user_input_ast = tantivy::query_grammar::parse_query(&self.user_text) - .map_err(|_| anyhow::anyhow!("Failed to parse query: `{}`.", &self.user_text))?; + .map_err(|_| anyhow::anyhow!("failed to parse query: `{}`", &self.user_text))?; let default_occur = match self.default_operator { BooleanOperand::And => Occur::Must, BooleanOperand::Or => Occur::Should, @@ -127,7 +127,7 @@ fn convert_user_input_ast_to_query_ast( lower, upper, } => { - let field: String = field.context("Range query without field is not supported.")?; + let field: String = field.context("range query without field is not supported")?; let convert_bound = |user_input_bound: UserInputBound| match user_input_bound { UserInputBound::Inclusive(user_text) => { Bound::Included(JsonLiteral::String(user_text)) @@ -151,7 +151,7 @@ fn convert_user_input_ast_to_query_ast( default_search_fields.to_vec() }; if field_names.is_empty() { - anyhow::bail!("Set query need to target a specific field."); + anyhow::bail!("set query need to target a specific field"); } let mut terms_per_field: HashMap> = Default::default(); let terms: BTreeSet = elements.into_iter().collect(); @@ -199,7 +199,7 @@ fn convert_user_input_literal( .collect() }; if field_names.is_empty() { - anyhow::bail!("Query requires a default search field and none was supplied."); + anyhow::bail!("query requires a default search field and none was supplied"); } let mode = match delimiter { Delimiter::None => FullTextMode::PhraseFallbackToIntersection, @@ -297,7 +297,7 @@ mod tests { .unwrap_err(); assert_eq!( &invalid_err.to_string(), - "Query requires a default search field and none was supplied." + "query requires a default search field and none was supplied" ); } { @@ -310,7 +310,7 @@ mod tests { .unwrap_err(); assert_eq!( &invalid_err.to_string(), - "Query requires a default search field and none was supplied." + "query requires a default search field and none was supplied" ); } } diff --git a/quickwit/quickwit-query/src/query_ast/utils.rs b/quickwit/quickwit-query/src/query_ast/utils.rs index 7ac12a4c2b0..8033424c158 100644 --- a/quickwit/quickwit-query/src/query_ast/utils.rs +++ b/quickwit/quickwit-query/src/query_ast/utils.rs @@ -140,7 +140,7 @@ fn compute_query_with_field( FieldType::Str(text_options) => { let text_field_indexing = text_options.get_indexing_options().ok_or_else(|| { InvalidQuery::SchemaError(format!( - "Field {} is not full-text searchable", + "field {} is not full-text searchable", field_entry.name() )) })?; @@ -166,7 +166,7 @@ fn compute_query_with_field( tokenizer_manager, ), FieldType::Facet(_) => Err(InvalidQuery::SchemaError( - "Facets are not supported in Quickwit.".to_string(), + "facets are not supported in Quickwit".to_string(), )), FieldType::Bytes(_) => { let buffer: Vec = parse_value_from_user_text(value, field_entry.name())?; diff --git a/quickwit/quickwit-rest-client/src/error.rs b/quickwit/quickwit-rest-client/src/error.rs index ac911d8fb76..6f2949c18fc 100644 --- a/quickwit/quickwit-rest-client/src/error.rs +++ b/quickwit/quickwit-rest-client/src/error.rs @@ -27,7 +27,7 @@ pub static DEFAULT_CONTENT_TYPE: &str = "application/json"; #[derive(Error, Debug)] pub enum Error { // Error returned by Quickwit server. - #[error("Api error: {0}")] + #[error("API error: {0}")] Api(#[from] ApiError), // Error returned by reqwest lib. #[error(transparent)] @@ -36,13 +36,13 @@ pub enum Error { #[error("IO error: {0}")] Io(#[from] tokio::io::Error), // Internal error returned by quickwit client lib. - #[error("Internal Quickwit client error: {0}")] + #[error("internal Quickwit client error: {0}")] Internal(String), // Json serialization/deserialization error. #[error("Serde JSON error: {0}")] Json(#[from] serde_json::error::Error), // Error returned by url lib when parsing a string. - #[error("Url parsing error: {0}")] + #[error("URL parsing error: {0}")] UrlParse(String), } diff --git a/quickwit/quickwit-search/src/cluster_client.rs b/quickwit/quickwit-search/src/cluster_client.rs index 14d30cb1fd4..cf5500d6b45 100644 --- a/quickwit/quickwit-search/src/cluster_client.rs +++ b/quickwit/quickwit-search/src/cluster_client.rs @@ -650,7 +650,7 @@ mod tests { Err(SearchError::Internal("retry error".to_string())), ) .unwrap_err(); - assert_eq!(merge_error.to_string(), "Internal error: `error`."); + assert_eq!(merge_error.to_string(), "internal error: `error`"); Ok(()) } diff --git a/quickwit/quickwit-search/src/collector.rs b/quickwit/quickwit-search/src/collector.rs index a6fff46a44e..bb11c57937e 100644 --- a/quickwit/quickwit-search/src/collector.rs +++ b/quickwit/quickwit-search/src/collector.rs @@ -544,7 +544,7 @@ impl Collector for QuickwitCollector { } fn map_error(err: postcard::Error) -> TantivyError { - TantivyError::InternalError(format!("Merge Result Postcard Error: {err}")) + TantivyError::InternalError(format!("merge result Postcard error: {err}")) } /// Merges a set of Leaf Results. diff --git a/quickwit/quickwit-search/src/error.rs b/quickwit/quickwit-search/src/error.rs index 6ffae9465ac..4865eb4ca75 100644 --- a/quickwit/quickwit-search/src/error.rs +++ b/quickwit/quickwit-search/src/error.rs @@ -30,17 +30,17 @@ use tokio::task::JoinError; #[allow(missing_docs)] #[derive(Error, Debug, Serialize, Deserialize, Clone)] pub enum SearchError { - #[error("Could not find indexes matching the IDs or patterns `{index_id_patterns:?}`.")] + #[error("could not find indexes matching the IDs or patterns `{index_id_patterns:?}`")] IndexesNotFound { index_id_patterns: Vec }, - #[error("Internal error: `{0}`.")] + #[error("internal error: `{0}`")] Internal(String), - #[error("Invalid aggregation request: {0}")] + #[error("invalid aggregation request: {0}")] InvalidAggregationRequest(String), #[error("Invalid argument: {0}")] InvalidArgument(String), #[error("{0}")] InvalidQuery(String), - #[error("Storage not found: `{0}`)")] + #[error("storage not found: `{0}`)")] StorageResolver(#[from] StorageResolverError), } @@ -71,7 +71,7 @@ pub fn parse_grpc_error(grpc_error: &tonic::Status) -> SearchError { impl From for SearchError { fn from(tantivy_error: TantivyError) -> Self { - SearchError::Internal(format!("tantivy error: {tantivy_error}")) + SearchError::Internal(format!("Tantivy error: {tantivy_error}")) } } @@ -119,7 +119,7 @@ impl From for SearchError { impl From for SearchError { fn from(join_error: JoinError) -> SearchError { - SearchError::Internal(format!("Spawned task in root join failed: {join_error}")) + SearchError::Internal(format!("spawned task in root join failed: {join_error}")) } } diff --git a/quickwit/quickwit-search/src/fetch_docs.rs b/quickwit/quickwit-search/src/fetch_docs.rs index 0a72104994c..13cae284d72 100644 --- a/quickwit/quickwit-search/src/fetch_docs.rs +++ b/quickwit/quickwit-search/src/fetch_docs.rs @@ -67,7 +67,7 @@ async fn fetch_docs_to_map( global_doc_addrs.into_iter().cloned().collect(); let split_and_offset = split_offsets_map .get(split_id) - .ok_or_else(|| anyhow::anyhow!("Failed to find offset for split {}", split_id))?; + .ok_or_else(|| anyhow::anyhow!("failed to find offset for split {}", split_id))?; split_fetch_docs_futures.push(fetch_docs_in_split( searcher_context.clone(), global_doc_addrs, @@ -89,7 +89,7 @@ async fn fetch_docs_to_map( .collect_vec(); error!(split_ids = ?split_ids, error = ?error, "Error when fetching docs in splits."); anyhow::anyhow!( - "Error when fetching docs for splits {:?}: {:?}.", + "error when fetching docs for splits {:?}: {:?}", split_ids, error ) @@ -301,7 +301,7 @@ async fn create_fields_snippet_generator( ) -> anyhow::Result { let schema = searcher.schema(); let query_ast_resolved = serde_json::from_str(&snippet_request.query_ast_resolved) - .context("Failed to deserialize QueryAst.")?; + .context("failed to deserialize QueryAst")?; let (query, _) = doc_mapper.query(schema.clone(), &query_ast_resolved, false)?; let mut snippet_generators = HashMap::new(); for field_name in &snippet_request.snippet_fields { diff --git a/quickwit/quickwit-search/src/find_trace_ids_collector.rs b/quickwit/quickwit-search/src/find_trace_ids_collector.rs index 57263165ff9..d88da060664 100644 --- a/quickwit/quickwit-search/src/find_trace_ids_collector.rs +++ b/quickwit/quickwit-search/src/find_trace_ids_collector.rs @@ -157,7 +157,7 @@ impl Collector for FindTraceIdsCollector { .bytes(&self.trace_id_field_name)? .ok_or_else(|| { let err_msg = format!( - "Failed to find column for trace_id field `{}`", + "failed to find column for trace_id field `{}`", self.trace_id_field_name ); tantivy::TantivyError::InternalError(err_msg) diff --git a/quickwit/quickwit-search/src/leaf.rs b/quickwit/quickwit-search/src/leaf.rs index 8b6d9620d2d..909194d9bc0 100644 --- a/quickwit/quickwit-search/src/leaf.rs +++ b/quickwit/quickwit-search/src/leaf.rs @@ -70,7 +70,7 @@ async fn get_split_footer_from_cache_or_fetch( .await .with_context(|| { format!( - "Failed to fetch hotcache and footer from {} for split `{}`", + "failed to fetch hotcache and footer from {} for split `{}`", index_storage.uri(), split_and_footer_offsets.split_id ) @@ -186,8 +186,8 @@ async fn warm_up_term_dict_fields( .get_field(term_dict_field_name) .with_context(|| { format!( - "Couldn't get field named `{term_dict_field_name}` from schema to warm up \ - term dicts." + "couldn't get field named `{term_dict_field_name}` from schema to warm up \ + term dicts" ) })?; @@ -215,7 +215,7 @@ async fn warm_up_postings( let mut fields = Vec::new(); for field_name in field_names.iter() { let field = searcher.schema().get_field(field_name).with_context(|| { - format!("Couldn't get field named `{field_name}` from schema to warm up postings.") + format!("couldn't get field named `{field_name}` from schema to warm up postings") })?; fields.push(field); @@ -380,7 +380,7 @@ async fn leaf_search_single_split( }) .await .map_err(|_| { - crate::SearchError::Internal(format!("Leaf search panicked. split={split_id}")) + crate::SearchError::Internal(format!("leaf search panicked. split={split_id}")) })??; searcher_context @@ -508,7 +508,7 @@ pub async fn leaf_search( merge_collector.merge_fruits(split_search_responses) }) .await - .context("Failed to merge split search responses.")??; + .context("failed to merge split search responses")??; merged_search_response .failed_splits @@ -540,7 +540,7 @@ async fn leaf_list_terms_single_split( .get_field(&search_request.field) .with_context(|| { format!( - "Couldn't get field named {:?} from schema to list terms.", + "couldn't get field named {:?} from schema to list terms", search_request.field ) })?; @@ -576,7 +576,7 @@ async fn leaf_list_terms_single_split( ) .read_bytes_async() .await - .with_context(|| "Failed to load sstable range")?; + .with_context(|| "failed to load sstable range")?; let mut range = dict.range(); if let Some(limit) = search_request.max_hits { @@ -590,7 +590,7 @@ async fn leaf_list_terms_single_split( } let mut stream = range .into_stream() - .with_context(|| "Failed to create stream over sstable")?; + .with_context(|| "failed to create stream over sstable")?; let mut segment_result: Vec> = Vec::with_capacity(search_request.max_hits.unwrap_or(0) as usize); while stream.advance() { diff --git a/quickwit/quickwit-search/src/root.rs b/quickwit/quickwit-search/src/root.rs index bbaa70109a2..7d630e7bb9d 100644 --- a/quickwit/quickwit-search/src/root.rs +++ b/quickwit/quickwit-search/src/root.rs @@ -166,7 +166,7 @@ fn validate_request_and_build_metadatas( &index_metadata.index_config.search_settings, ) .map_err(|err| { - SearchError::Internal(format!("Failed to build doc mapper. Cause: {err}")) + SearchError::Internal(format!("failed to build doc mapper. cause: {err}")) })?; let query_ast_resolved_for_index = query_ast .clone() @@ -178,8 +178,8 @@ fn validate_request_and_build_metadatas( if let Some(query_ast_resolved) = &query_ast_resolved_opt { if query_ast_resolved != &query_ast_resolved_for_index { return Err(SearchError::InvalidQuery( - "Resolved query ASTs must be the same across indexes. Resolving queries with \ - different default fields are different between indexes is not supported." + "resolved query ASTs must be the same across indexes. resolving queries with \ + different default fields are different between indexes is not supported" .to_string(), )); } @@ -192,7 +192,7 @@ fn validate_request_and_build_metadatas( match timestamp_field_opt { Some(timestamp_field) if timestamp_field != timestamp_field_for_index => { return Err(SearchError::InvalidQuery( - "The timestamp field (if present) must be the same for all indexes." + "the timestamp field (if present) must be the same for all indexes" .to_string(), )); } @@ -210,7 +210,7 @@ fn validate_request_and_build_metadatas( let index_metadata_for_leaf_search = IndexMetasForLeafSearch { index_uri: index_metadata.index_uri().clone(), doc_mapper_str: serde_json::to_string(&doc_mapper).map_err(|err| { - SearchError::Internal(format!("Failed to serialize doc mapper. Cause: {err}")) + SearchError::Internal(format!("failed to serialize doc mapper. cause: {err}")) })?, }; metadatas_for_leaf.insert( @@ -221,7 +221,7 @@ fn validate_request_and_build_metadatas( let query_ast_resolved = query_ast_resolved_opt.ok_or_else(|| { SearchError::Internal( - "Resolved query AST must be present. This should never happen.".to_string(), + "resolved query AST must be present. this should never happen".to_string(), ) })?; @@ -240,14 +240,14 @@ fn validate_requested_snippet_fields( FieldType::Str(text_options) => { if !text_options.is_stored() { return Err(anyhow::anyhow!( - "The snippet field `{}` must be stored.", + "the snippet field `{}` must be stored", field_name )); } } other => { return Err(anyhow::anyhow!( - "The snippet field `{}` must be of type `Str`, got `{}`.", + "the snippet field `{}` must be of type `Str`, got `{}`", field_name, other.value_type().name() )) @@ -263,7 +263,7 @@ fn validate_sort_by_fields(sort_fields: &[SortField], schema: &Schema) -> crate: } if sort_fields.len() > 2 { return Err(SearchError::InvalidArgument(format!( - "Sort by field must be up to 2 fields {:?}.", + "sort by field must be up to 2 fields {:?}", sort_fields ))); } @@ -302,18 +302,18 @@ fn validate_sort_by_field(field_name: &str, schema: &Schema) -> crate::Result<() let (sort_by_field, _json_path) = schema .find_field_with_default(field_name, dynamic_field_opt) .ok_or_else(|| { - SearchError::InvalidArgument(format!("Unknown field used in `sort by`: {field_name}")) + SearchError::InvalidArgument(format!("unknown field used in `sort by`: {field_name}")) })?; let sort_by_field_entry = schema.get_field_entry(sort_by_field); if matches!(sort_by_field_entry.field_type(), FieldType::Str(_)) { return Err(SearchError::InvalidArgument(format!( - "Sort by field on type text is currently not supported `{field_name}`." + "sort by field on type text is currently not supported `{field_name}`" ))); } if !sort_by_field_entry.is_fast() { return Err(SearchError::InvalidArgument(format!( - "Sort by field must be a fast field, please add the fast property to your field \ - `{field_name}`.", + "sort by field must be a fast field, please add the fast property to your field \ + `{field_name}`", ))); } Ok(()) @@ -361,7 +361,7 @@ fn get_scroll_ttl_duration(search_request: &SearchRequest) -> crate::Result MAX_SCROLL_TTL { return Err(SearchError::InvalidArgument(format!( - "Quickwit only supports scroll TTL period up to {} secs.", + "Quickwit only supports scroll TTL period up to {} secs", MAX_SCROLL_TTL.as_secs() ))); } @@ -877,13 +877,13 @@ pub async fn root_list_terms( let doc_mapper = build_doc_mapper(&index_config.doc_mapping, &index_config.search_settings) .map_err(|err| { - SearchError::Internal(format!("Failed to build doc mapper. Cause: {err}")) + SearchError::Internal(format!("failed to build doc mapper. cause: {err}")) })?; let schema = doc_mapper.schema(); let field = schema.get_field(&list_terms_request.field).map_err(|_| { SearchError::InvalidQuery(format!( - "Failed to list terms in `{}`, field doesn't exist", + "failed to list terms in `{}`, field doesn't exist", list_terms_request.field )) })?; @@ -891,7 +891,7 @@ pub async fn root_list_terms( let field_entry = schema.get_field_entry(field); if !field_entry.is_indexed() { return Err(SearchError::InvalidQuery( - "Trying to list terms on field which isn't indexed".to_string(), + "trying to list terms on field which isn't indexed".to_string(), )); } @@ -939,7 +939,7 @@ pub async fn root_list_terms( .collect(); if !failed_splits.is_empty() { - error!(failed_splits = ?failed_splits, "Leaf search response contains at least one failed split."); + error!(failed_splits = ?failed_splits, "leaf search response contains at least one failed split"); let errors: String = failed_splits .iter() .map(|splits| splits.to_string()) @@ -1007,7 +1007,7 @@ async fn assign_client_fetch_docs_jobs( .get(&split_id) .ok_or_else(|| { crate::SearchError::Internal(format!( - "Received partial hit from an unknown split {split_id}" + "received partial hit from an unknown split {split_id}" )) })? .clone(); @@ -1046,7 +1046,7 @@ pub fn jobs_to_leaf_requests( for (index_uid, job_group) in &jobs.into_iter().group_by(|job| job.index_uid.clone()) { let search_index_meta = search_indexes_metadatas.get(&index_uid).ok_or_else(|| { SearchError::Internal(format!( - "Received search job for an unknown index {index_uid}. It should never happen." + "received search job for an unknown index {index_uid}. it should never happen" )) })?; let leaf_search_request = LeafSearchRequest { @@ -1073,7 +1073,7 @@ pub fn jobs_to_fetch_docs_requests( .get(&index_uid) .ok_or_else(|| { SearchError::Internal(format!( - "Received search job for an unknown index {index_uid}" + "received search job for an unknown index {index_uid}" )) })?; let fetch_docs_jobs: Vec = job_group.collect(); @@ -1131,7 +1131,7 @@ mod tests { check_snippet_fields_validation(&["title".to_string()]).unwrap_err(); assert_eq!( field_not_stored_err.to_string(), - "The snippet field `title` must be stored." + "the snippet field `title` must be stored" ); let field_doesnotexist_err = check_snippet_fields_validation(&["doesnotexist".to_string()]).unwrap_err(); @@ -1143,7 +1143,7 @@ mod tests { check_snippet_fields_validation(&["ip".to_string()]).unwrap_err(); assert_eq!( field_is_not_text_err.to_string(), - "The snippet field `ip` must be of type `Str`, got `IpAddr`." + "the snippet field `ip` must be of type `Str`, got `IpAddr`" ); } @@ -1255,7 +1255,7 @@ mod tests { .unwrap_err(); assert_eq!( timestamp_field_different.to_string(), - "The timestamp field (if present) must be the same for all indexes." + "the timestamp field (if present) must be the same for all indexes" ); } @@ -1282,8 +1282,8 @@ mod tests { .unwrap_err(); assert_eq!( timestamp_field_different.to_string(), - "Resolved query ASTs must be the same across indexes. Resolving queries with \ - different default fields are different between indexes is not supported." + "resolved query ASTs must be the same across indexes. resolving queries with \ + different default fields are different between indexes is not supported" ); } @@ -2518,7 +2518,7 @@ mod tests { assert!(search_response.is_err()); assert_eq!( search_response.unwrap_err().to_string(), - "Invalid aggregation request: unknown variant `termss`, expected one of `range`, \ + "invalid aggregation request: unknown variant `termss`, expected one of `range`, \ `histogram`, `date_histogram`, `terms`, `avg`, `value_count`, `max`, `min`, `stats`, \ `sum`, `percentiles` at line 18 column 13" ); diff --git a/quickwit/quickwit-search/src/scroll_context.rs b/quickwit/quickwit-search/src/scroll_context.rs index 2491c54fec6..62ab4295809 100644 --- a/quickwit/quickwit-search/src/scroll_context.rs +++ b/quickwit/quickwit-search/src/scroll_context.rs @@ -92,7 +92,7 @@ impl ScrollContext { pub fn load(payload: &[u8]) -> anyhow::Result { let scroll_context = - serde_json::from_slice(payload).context("Failed to deserialize context")?; + serde_json::from_slice(payload).context("failed to deserialize context")?; Ok(scroll_context) } @@ -200,7 +200,7 @@ impl FromStr for ScrollKeyAndStartOffset { .decode(scroll_id_str) .map_err(|_| "scroll id is invalid base64.")?; if base64_decoded.len() != 16 + 8 + 4 { - return Err("scroll id payload is not 8 bytes long."); + return Err("scroll id payload is not 8 bytes long"); } let (scroll_ulid_bytes, from_bytes, max_hits_bytes) = ( &base64_decoded[..16], diff --git a/quickwit/quickwit-search/src/search_job_placer.rs b/quickwit/quickwit-search/src/search_job_placer.rs index 8f06dc00273..f78798ed601 100644 --- a/quickwit/quickwit-search/src/search_job_placer.rs +++ b/quickwit/quickwit-search/src/search_job_placer.rs @@ -130,7 +130,7 @@ impl SearchJobPlacer { if candidate_nodes.is_empty() { bail!( - "Failed to assign search jobs. There are no available searcher nodes in the pool." + "failed to assign search jobs. there are no available searcher nodes in the pool" ); } jobs.sort_unstable_by(Job::compare_cost); diff --git a/quickwit/quickwit-search/src/search_response_rest.rs b/quickwit/quickwit-search/src/search_response_rest.rs index 894bd1ddc71..0fe86f9ecda 100644 --- a/quickwit/quickwit-search/src/search_response_rest.rs +++ b/quickwit/quickwit-search/src/search_response_rest.rs @@ -58,7 +58,7 @@ impl TryFrom for SearchResponseRest { for hit in search_response.hits { let document: JsonValue = serde_json::from_str(&hit.json).map_err(|err| { SearchError::Internal(format!( - "Failed to serialize document `{}` to JSON: `{}`.", + "failed to serialize document `{}` to JSON: `{}`", truncate_str(&hit.json, 100), err )) @@ -69,7 +69,7 @@ impl TryFrom for SearchResponseRest { let snippet_opt: JsonValue = serde_json::from_str(&snippet_json).map_err(|err| { SearchError::Internal(format!( - "Failed to serialize snippet `{snippet_json}` to JSON: `{err}`." + "failed to serialize snippet `{snippet_json}` to JSON: `{err}`" )) })?; snippets.push(snippet_opt); diff --git a/quickwit/quickwit-search/src/search_stream/leaf.rs b/quickwit/quickwit-search/src/search_stream/leaf.rs index 89034756ab5..c551a47dff4 100644 --- a/quickwit/quickwit-search/src/search_stream/leaf.rs +++ b/quickwit/quickwit-search/src/search_stream/leaf.rs @@ -144,14 +144,14 @@ async fn leaf_search_stream_single_split( )?); let output_format = OutputFormat::from_i32(stream_request.output_format) - .ok_or_else(|| SearchError::Internal("Invalid output format specified.".to_string()))?; + .ok_or_else(|| SearchError::Internal("invalid output format specified".to_string()))?; if request_fields.partition_by_fast_field.is_some() && output_format != OutputFormat::ClickHouseRowBinary { return Err(SearchError::Internal( - "Invalid output format specified, only ClickHouseRowBinary is allowed when providing \ - a partitioned-by field." + "invalid output format specified, only ClickHouseRowBinary is allowed when providing \ + a partitioned-by field" .to_string(), )); } @@ -207,7 +207,7 @@ async fn leaf_search_stream_single_split( )?; super::serialize::(&collected_values, &mut buffer, output_format).map_err( |_| { - SearchError::Internal("Error when serializing i64 during export".to_owned()) + SearchError::Internal("error when serializing i64 during export".to_owned()) }, )?; } @@ -220,7 +220,7 @@ async fn leaf_search_stream_single_split( )?; super::serialize::(&collected_values, &mut buffer, output_format).map_err( |_| { - SearchError::Internal("Error when serializing u64 during export".to_owned()) + SearchError::Internal("error when serializing u64 during export".to_owned()) }, )?; } @@ -241,7 +241,7 @@ async fn leaf_search_stream_single_split( // We serialize Date as i64 microseconds. super::serialize::(&collected_values_as_micros, &mut buffer, output_format) .map_err(|_| { - SearchError::Internal("Error when serializing i64 during export".to_owned()) + SearchError::Internal("error when serializing i64 during export".to_owned()) })?; } (Type::I64, Some(Type::I64)) => { @@ -253,7 +253,7 @@ async fn leaf_search_stream_single_split( )?; super::serialize_partitions::(collected_values.as_slice(), &mut buffer) .map_err(|_| { - SearchError::Internal("Error when serializing i64 during export".to_owned()) + SearchError::Internal("error when serializing i64 during export".to_owned()) })?; } (Type::U64, Some(Type::U64)) => { @@ -265,27 +265,27 @@ async fn leaf_search_stream_single_split( )?; super::serialize_partitions::(collected_values.as_slice(), &mut buffer) .map_err(|_| { - SearchError::Internal("Error when serializing i64 during export".to_owned()) + SearchError::Internal("error when serializing i64 during export".to_owned()) })?; } (fast_field_type, None) => { return Err(SearchError::Internal(format!( - "Search stream does not support fast field of type `{fast_field_type:?}`." + "search stream does not support fast field of type `{fast_field_type:?}`" ))); } (fast_field_type, Some(partition_fast_field_type)) => { return Err(SearchError::Internal(format!( - "Search stream does not support the combination of fast field type \ + "search stream does not support the combination of fast field type \ `{fast_field_type:?}` and partition fast field type \ - `{partition_fast_field_type:?}`." + `{partition_fast_field_type:?}`" ))); } }; Result::>::Ok(buffer) }); let buffer = collect_handle.await.map_err(|_| { - error!(split_id = %split.split_id, request_fields=%request_fields, "Failed to collect fast field"); - SearchError::Internal(format!("Error when collecting fast field values for split {}", split.split_id)) + error!(split_id = %split.split_id, request_fields=%request_fields, "failed to collect fast field"); + SearchError::Internal(format!("error when collecting fast field values for split {}", split.split_id)) })??; Ok(LeafSearchStreamResponse { data: buffer, @@ -371,7 +371,7 @@ impl<'a> SearchStreamRequestFields { if !Self::is_fast_field(schema, &fast_field) { return Err(SearchError::InvalidQuery(format!( - "Field `{}` is not a fast field", + "field `{}` is not a fast field", &stream_request.fast_field ))); } @@ -386,7 +386,7 @@ impl<'a> SearchStreamRequestFields { && !Self::is_fast_field(schema, &partition_by_fast_field.unwrap()) { return Err(SearchError::InvalidQuery(format!( - "Field `{}` is not a fast field", + "field `{}` is not a fast field", &stream_request.partition_by_field.as_deref().unwrap() ))); } @@ -647,7 +647,7 @@ mod tests { .await; let res = single_node_stream.next().await.expect("no leaf result"); let error_message = res.unwrap_err().to_string(); - assert!(error_message.contains("Search stream does not support fast field of type `Str`"),); + assert!(error_message.contains("search stream does not support fast field of type `Str`"),); test_sandbox.assert_quit().await; Ok(()) } diff --git a/quickwit/quickwit-search/src/search_stream/root.rs b/quickwit/quickwit-search/src/search_stream/root.rs index 2b7890e391b..f69f601cc6d 100644 --- a/quickwit/quickwit-search/src/search_stream/root.rs +++ b/quickwit/quickwit-search/src/search_stream/root.rs @@ -52,7 +52,7 @@ pub async fn root_search_stream( let doc_mapper = build_doc_mapper(&index_config.doc_mapping, &index_config.search_settings) .map_err(|err| { - SearchError::Internal(format!("Failed to build doc mapper. Cause: {err}")) + SearchError::Internal(format!("failed to build doc mapper. cause: {err}")) })?; let query_ast: QueryAst = serde_json::from_str(&search_stream_request.query_ast) @@ -84,7 +84,7 @@ pub async fn root_search_stream( .await?; let doc_mapper_str = serde_json::to_string(&doc_mapper).map_err(|err| { - SearchError::Internal(format!("Failed to serialize doc mapper: Cause {err}")) + SearchError::Internal(format!("failed to serialize doc mapper: cause {err}")) })?; let index_uri: &Uri = &index_config.index_uri; @@ -296,7 +296,7 @@ mod tests { let stream = root_search_stream(request, &metastore, cluster_client).await?; let result: Result, SearchError> = stream.try_collect().await; assert_eq!(result.is_err(), true); - assert_eq!(result.unwrap_err().to_string(), "Internal error: `error`."); + assert_eq!(result.unwrap_err().to_string(), "internal error: `error`"); Ok(()) } diff --git a/quickwit/quickwit-search/src/service.rs b/quickwit/quickwit-search/src/service.rs index 227baadd555..d3d60b179ee 100644 --- a/quickwit/quickwit-search/src/service.rs +++ b/quickwit/quickwit-search/src/service.rs @@ -152,7 +152,7 @@ impl SearchServiceImpl { fn deserialize_doc_mapper(doc_mapper_str: &str) -> crate::Result> { let doc_mapper = serde_json::from_str::>(doc_mapper_str).map_err(|err| { - SearchError::Internal(format!("Failed to deserialize doc mapper: `{err}`")) + SearchError::Internal(format!("failed to deserialize doc mapper: `{err}`")) })?; Ok(doc_mapper) } @@ -176,7 +176,7 @@ impl SearchService for SearchServiceImpl { ) -> crate::Result { let search_request = leaf_search_request .search_request - .ok_or_else(|| SearchError::Internal("No search request.".to_string()))?; + .ok_or_else(|| SearchError::Internal("no search request".to_string()))?; let storage = self .storage_resolver .resolve(&Uri::from_well_formed(leaf_search_request.index_uri)) @@ -239,7 +239,7 @@ impl SearchService for SearchServiceImpl { ) -> crate::Result>> { let stream_request = leaf_stream_request .request - .ok_or_else(|| SearchError::Internal("No search request.".to_string()))?; + .ok_or_else(|| SearchError::Internal("no search request".to_string()))?; let storage = self .storage_resolver .resolve(&Uri::from_well_formed(leaf_stream_request.index_uri)) @@ -276,7 +276,7 @@ impl SearchService for SearchServiceImpl { ) -> crate::Result { let search_request = leaf_search_request .list_terms_request - .ok_or_else(|| SearchError::Internal("No search request.".to_string()))?; + .ok_or_else(|| SearchError::Internal("no search request".to_string()))?; let storage = self .storage_resolver .resolve(&Uri::from_well_formed(leaf_search_request.index_uri)) @@ -323,10 +323,10 @@ pub(crate) async fn scroll( let scroll_key: [u8; 16] = current_scroll.scroll_key(); let payload = cluster_client.get_kv(&scroll_key[..]).await; let payload = - payload.ok_or_else(|| SearchError::Internal("scroll key not found.".to_string()))?; + payload.ok_or_else(|| SearchError::Internal("scroll key not found".to_string()))?; let mut scroll_context = ScrollContext::load(&payload) - .map_err(|_| SearchError::Internal("Corrupted scroll context.".to_string()))?; + .map_err(|_| SearchError::Internal("corrupted Scroll context".to_string()))?; let end_doc: u64 = start_doc + scroll_context.max_hits_per_page; diff --git a/quickwit/quickwit-search/src/tests.rs b/quickwit/quickwit-search/src/tests.rs index 206a5236adc..4cf405fe135 100644 --- a/quickwit/quickwit-search/src/tests.rs +++ b/quickwit/quickwit-search/src/tests.rs @@ -443,7 +443,7 @@ async fn test_single_node_filtering() -> anyhow::Result<()> { assert!(single_node_response.is_err()); assert_eq!( single_node_response.err().map(|err| err.to_string()), - Some("Invalid query: Field does not exist: `tag`".to_string()) + Some("invalid query: field does not exist: `tag`".to_string()) ); test_sandbox.assert_quit().await; Ok(()) @@ -886,7 +886,7 @@ async fn test_single_node_invalid_sorting_with_query() { let error_msg = single_node_response.unwrap_err().to_string(); assert_eq!( error_msg, - "Invalid argument: Sort by field on type text is currently not supported `description`." + "Invalid argument: sort by field on type text is currently not supported `description`" ); test_sandbox.assert_quit().await; } diff --git a/quickwit/quickwit-serve/src/delete_task_api/handler.rs b/quickwit/quickwit-serve/src/delete_task_api/handler.rs index 2ca6ffab114..462e468a601 100644 --- a/quickwit/quickwit-serve/src/delete_task_api/handler.rs +++ b/quickwit/quickwit-serve/src/delete_task_api/handler.rs @@ -140,7 +140,7 @@ pub async fn post_delete_request( .parse_user_query(&[]) .map_err(|err| JanitorError::InvalidDeleteQuery(err.to_string()))?; let query_ast_json = serde_json::to_string(&query_ast).map_err(|_err| { - JanitorError::Internal("Failed to serialized delete query ast".to_string()) + JanitorError::Internal("failed to serialized delete query ast".to_string()) })?; let delete_query = DeleteQuery { index_uid: index_uid.to_string(), @@ -223,7 +223,7 @@ mod tests { .reply(&delete_query_api_handlers) .await; assert_eq!(resp.status(), 400); - assert!(String::from_utf8_lossy(resp.body()).contains("Invalid delete query")); + assert!(String::from_utf8_lossy(resp.body()).contains("invalid delete query")); // GET delete tasks. let resp = warp::test::request() diff --git a/quickwit/quickwit-serve/src/elastic_search_api/bulk.rs b/quickwit/quickwit-serve/src/elastic_search_api/bulk.rs index 2331edf8a4b..a0c0ba4390b 100644 --- a/quickwit/quickwit-serve/src/elastic_search_api/bulk.rs +++ b/quickwit/quickwit-serve/src/elastic_search_api/bulk.rs @@ -37,9 +37,9 @@ use crate::with_arg; #[derive(Error, Debug)] pub enum IngestRestApiError { - #[error("Failed to parse action `{0}`.")] + #[error("failed to parse action `{0}`")] BulkInvalidAction(String), - #[error("Failed to parse source `{0}`.")] + #[error("failed to parse source `{0}`")] BulkInvalidSource(String), #[error(transparent)] IngestApi(#[from] IngestServiceError), @@ -94,7 +94,7 @@ async fn elastic_ingest_bulk( let action = serde_json::from_slice::(line) .map_err(|error| IngestRestApiError::BulkInvalidAction(error.to_string()))?; let source = lines.next().ok_or_else(|| { - IngestRestApiError::BulkInvalidSource("Expected source for the action.".to_string()) + IngestRestApiError::BulkInvalidSource("expected source for the action".to_string()) })?; // when ingesting on /my-index/_bulk, if _index: is set to something else than my-index, // ES honors it and create the doc in the requested index. That is, `my-index` is a default diff --git a/quickwit/quickwit-serve/src/elastic_search_api/mod.rs b/quickwit/quickwit-serve/src/elastic_search_api/mod.rs index 648ee67493f..d6592ef03e1 100644 --- a/quickwit/quickwit-serve/src/elastic_search_api/mod.rs +++ b/quickwit/quickwit-serve/src/elastic_search_api/mod.rs @@ -200,7 +200,7 @@ mod tests { let error_cause = es_msearch_response.responses[1].error.as_ref().unwrap(); assert_eq!( error_cause.reason.as_ref().unwrap(), - "Internal error: `something bad happened`." + "internal error: `something bad happened`" ); } @@ -229,7 +229,7 @@ mod tests { .error .reason .unwrap() - .starts_with("Invalid argument: Failed to parse request header")); + .starts_with("Invalid argument: failed to parse request header")); } #[tokio::test] @@ -257,7 +257,7 @@ mod tests { .error .reason .unwrap() - .starts_with("Invalid argument: Failed to parse request body")); + .starts_with("Invalid argument: failed to parse request body")); } #[tokio::test] @@ -284,7 +284,7 @@ mod tests { .error .reason .unwrap() - .starts_with("Invalid argument: Expect request body after request header")); + .starts_with("Invalid argument: expect request body after request header")); } #[tokio::test] @@ -310,7 +310,7 @@ mod tests { let es_error: ElasticSearchError = serde_json::from_slice(resp.body()).unwrap(); assert_eq!( es_error.error.reason.unwrap(), - "Invalid argument: `_msearch` request header must define at least one index." + "Invalid argument: `_msearch` request header must define at least one index" ); } diff --git a/quickwit/quickwit-serve/src/elastic_search_api/model/search_query_params.rs b/quickwit/quickwit-serve/src/elastic_search_api/model/search_query_params.rs index d164aba2b32..890a78dba99 100644 --- a/quickwit/quickwit-serve/src/elastic_search_api/model/search_query_params.rs +++ b/quickwit/quickwit-serve/src/elastic_search_api/model/search_query_params.rs @@ -156,7 +156,7 @@ fn parse_sort_field_str(sort_field_str: &str) -> Result if let Some((field, order_str)) = sort_field_str.split_once(':') { let order = SortOrder::from_str_name(order_str).ok_or_else(|| { SearchError::InvalidArgument(format!( - "Invalid sort order `{}`. Expected `asc` or `desc`", + "invalid sort order `{}`. expected `asc` or `desc`", field )) })?; @@ -200,7 +200,7 @@ impl SearchQueryParams { return Ok(None); }; let duration: Duration = humantime::parse_duration(scroll_str).map_err(|_err| { - SearchError::InvalidArgument(format!("Invalid scroll duration: `{scroll_str}`")) + SearchError::InvalidArgument(format!("invalid scroll duration: `{scroll_str}`")) })?; Ok(Some(duration)) } diff --git a/quickwit/quickwit-serve/src/elastic_search_api/rest_handler.rs b/quickwit/quickwit-serve/src/elastic_search_api/rest_handler.rs index 5d0d92f515b..0db4b67d99f 100644 --- a/quickwit/quickwit-serve/src/elastic_search_api/rest_handler.rs +++ b/quickwit/quickwit-serve/src/elastic_search_api/rest_handler.rs @@ -169,7 +169,7 @@ fn build_request_for_es_api( .collect(); if sort_fields.len() >= 3 { return Err(ElasticSearchError::from(SearchError::InvalidArgument( - format!("Only up to two sort fields supported at the moment. Got {sort_fields:?}"), + format!("only up to two sort fields supported at the moment. got {sort_fields:?}"), ))); } @@ -232,26 +232,26 @@ async fn es_compat_index_multi_search( ) -> Result { let mut search_requests = Vec::new(); let str_payload = from_utf8(&payload) - .map_err(|err| SearchError::InvalidQuery(format!("Invalid UTF-8: {}", err)))?; + .map_err(|err| SearchError::InvalidQuery(format!("invalid UTF-8: {}", err)))?; let mut payload_lines = str_lines(str_payload); while let Some(line) = payload_lines.next() { let request_header = serde_json::from_str::(line).map_err(|err| { SearchError::InvalidArgument(format!( - "Failed to parse request header `{}...`: {}", + "failed to parse request header `{}...`: {}", truncate_str(line, 20), err )) })?; if request_header.index.is_empty() { return Err(ElasticSearchError::from(SearchError::InvalidArgument( - "`_msearch` request header must define at least one index.".to_string(), + "`_msearch` request header must define at least one index".to_string(), ))); } for index in &request_header.index { validate_index_id_pattern(index).map_err(|err| { SearchError::InvalidArgument(format!( - "Request header contains an invalid index: {}", + "request header contains an invalid index: {}", err )) })?; @@ -260,12 +260,12 @@ async fn es_compat_index_multi_search( let search_body = payload_lines .next() .ok_or_else(|| { - SearchError::InvalidArgument("Expect request body after request header".to_string()) + SearchError::InvalidArgument("expect request body after request header".to_string()) }) .and_then(|line| { serde_json::from_str::(line).map_err(|err| { SearchError::InvalidArgument(format!( - "Failed to parse request body `{}...`: {}", + "failed to parse request body `{}...`: {}", truncate_str(line, 20), err )) @@ -309,7 +309,7 @@ async fn es_scroll( ) -> Result { let start_instant = Instant::now(); let Some(scroll_id) = scroll_query_params.scroll_id.clone() else { - return Err(SearchError::InvalidArgument("Missing scroll_id".to_string()).into()); + return Err(SearchError::InvalidArgument("missing scroll_id".to_string()).into()); }; let scroll_ttl_secs: Option = if let Some(scroll_ttl) = scroll_query_params.scroll { let scroll_ttl_duration = humantime::parse_duration(&scroll_ttl) diff --git a/quickwit/quickwit-serve/src/index_api/rest_handler.rs b/quickwit/quickwit-serve/src/index_api/rest_handler.rs index 36d63e04ed1..e87d155fbfc 100644 --- a/quickwit/quickwit-serve/src/index_api/rest_handler.rs +++ b/quickwit/quickwit-serve/src/index_api/rest_handler.rs @@ -94,8 +94,8 @@ fn json_body( #[derive(Debug, Error)] #[error( - "Unsupported content-type header. Choices are application/json, application/toml and \ - application/yaml." + "unsupported content-type header. choices are application/json, application/toml and \ + application/yaml" )] pub struct UnsupportedContentType; impl warp::reject::Reject for UnsupportedContentType {} @@ -551,8 +551,8 @@ async fn create_source( .map_err(IndexServiceError::InvalidConfig)?; if let SourceParams::File(_) = &source_config.source_params { return Err(IndexServiceError::OperationNotAllowed( - "File sources are limited to a local usage. Please use the CLI command `quickwit tool \ - local-ingest` to ingest data from a file." + "file sources are limited to a local usage. please use the CLI command `quickwit tool \ + local-ingest` to ingest data from a file" .to_string(), )); } @@ -674,8 +674,8 @@ async fn toggle_source( let index_uid: IndexUid = metastore.index_metadata(&index_id).await?.index_uid; if [CLI_INGEST_SOURCE_ID, INGEST_API_SOURCE_ID].contains(&source_id.as_str()) { return Err(IndexServiceError::OperationNotAllowed(format!( - "Source `{source_id}` is managed by Quickwit, you cannot enable or disable a source \ - managed by Quickwit." + "source `{source_id}` is managed by Quickwit, you cannot enable or disable a source \ + managed by Quickwit" ))); } metastore @@ -717,8 +717,8 @@ async fn delete_source( let index_uid: IndexUid = metastore.index_metadata(&index_id).await?.index_uid; if [INGEST_API_SOURCE_ID, CLI_INGEST_SOURCE_ID].contains(&source_id.as_str()) { return Err(IndexServiceError::OperationNotAllowed(format!( - "Source `{source_id}` is managed by Quickwit, you cannot delete a source managed by \ - Quickwit." + "source `{source_id}` is managed by Quickwit, you cannot delete a source managed by \ + Quickwit" ))); } metastore.delete_source(index_uid, &source_id).await?; @@ -762,7 +762,7 @@ async fn analyze_request(request: AnalyzeRequest) -> Result ApiError { error!("REST server error: {:?}", rejection); ApiError { service_code: ServiceErrorCode::Internal, - message: "Internal server error.".to_string(), + message: "internal server error".to_string(), } } } diff --git a/quickwit/quickwit-serve/src/search_api/mod.rs b/quickwit/quickwit-serve/src/search_api/mod.rs index 847d9b4a092..60b1bf0cf49 100644 --- a/quickwit/quickwit-serve/src/search_api/mod.rs +++ b/quickwit/quickwit-serve/src/search_api/mod.rs @@ -112,7 +112,7 @@ mod tests { .withf(|request| request.split_offsets.len() == 1) // Retry request on the failing split. .return_once( |_leaf_search_req: quickwit_proto::search::LeafSearchStreamRequest| { - Err(SearchError::Internal("Error again on `split2`".to_string())) + Err(SearchError::Internal("error again on `split2`".to_string())) }, ); // The test will hang on indefinitely if we don't drop the sender. @@ -132,7 +132,7 @@ mod tests { let search_error = search_stream_result.unwrap_err(); assert_eq!( search_error.to_string(), - "Internal error: `Internal error: `Error again on `split2``.`." + "internal error: `internal error: `error again on `split2```" ); Ok(()) } diff --git a/quickwit/quickwit-serve/src/search_api/rest_handler.rs b/quickwit/quickwit-serve/src/search_api/rest_handler.rs index de5ffbf42fe..8bc7ed3a5c6 100644 --- a/quickwit/quickwit-serve/src/search_api/rest_handler.rs +++ b/quickwit/quickwit-serve/src/search_api/rest_handler.rs @@ -132,7 +132,7 @@ fn deserialize_non_empty_string<'de, D>(deserializer: D) -> Result { let value = String::deserialize(deserializer)?; if value.is_empty() { - return Err(de::Error::custom("Expected a non-empty string field.")); + return Err(de::Error::custom("expected a non-empty string field")); } Ok(value) } @@ -602,8 +602,8 @@ mod tests { .unwrap(); assert_eq!( rejection.0, - "Index ID pattern `quickwit-demo-index**` is invalid. Patterns must not contain \ - multiple consecutive `*`." + "index ID pattern `quickwit-demo-index**` is invalid. patterns must not contain \ + multiple consecutive `*`" ); } @@ -1047,10 +1047,7 @@ mod tests { .await .unwrap_err(); let parse_error = rejection.find::().unwrap(); - assert_eq!( - parse_error.to_string(), - "Expected a non-empty string field." - ); + assert_eq!(parse_error.to_string(), "expected a non-empty string field"); } #[tokio::test] diff --git a/quickwit/quickwit-storage/src/bundle_storage.rs b/quickwit/quickwit-storage/src/bundle_storage.rs index cf02cf2b94b..e5128222be2 100644 --- a/quickwit/quickwit-storage/src/bundle_storage.rs +++ b/quickwit/quickwit-storage/src/bundle_storage.rs @@ -96,7 +96,7 @@ impl BundleStorage { } #[derive(Debug, Error)] -#[error("CorruptedData. error: {error:?}")] +#[error("corrupted data. error: {error:?}")] pub struct CorruptedData { #[from] #[source] @@ -135,7 +135,7 @@ impl VersionedComponent for BundleStorageFileOffsetsVersions { } fn deserialize_impl(&self, bytes: &mut OwnedBytes) -> anyhow::Result { - serde_json::from_reader(bytes).context("Deserializing bundle storage file offsets failed") + serde_json::from_reader(bytes).context("deserializing bundle storage file offsets failed") } } @@ -241,7 +241,7 @@ impl Storage for BundleStorage { ) -> crate::StorageResult { let file_offsets = self.metadata.get(path).ok_or_else(|| { crate::StorageErrorKind::NotFound - .with_error(anyhow::anyhow!("Missing file `{}`", path.display())) + .with_error(anyhow::anyhow!("missing file `{}`", path.display())) })?; let new_range = file_offsets.start as usize + range.start..file_offsets.start as usize + range.end; @@ -253,7 +253,7 @@ impl Storage for BundleStorage { async fn get_all(&self, path: &Path) -> crate::StorageResult { let file_offsets = self.metadata.get(path).ok_or_else(|| { crate::StorageErrorKind::NotFound - .with_error(anyhow::anyhow!("Missing file `{}`", path.display())) + .with_error(anyhow::anyhow!("missing file `{}`", path.display())) })?; self.storage .get_slice( @@ -282,7 +282,7 @@ impl Storage for BundleStorage { async fn file_num_bytes(&self, path: &Path) -> StorageResult { let file_range = self.metadata.get(path).ok_or_else(|| { crate::StorageErrorKind::NotFound - .with_error(anyhow::anyhow!("Missing file `{}`", path.display())) + .with_error(anyhow::anyhow!("missing file `{}`", path.display())) })?; Ok(file_range.end - file_range.start) } diff --git a/quickwit/quickwit-storage/src/error.rs b/quickwit/quickwit-storage/src/error.rs index 615f64d07f8..3b1a6fa57b6 100644 --- a/quickwit/quickwit-storage/src/error.rs +++ b/quickwit/quickwit-storage/src/error.rs @@ -48,22 +48,22 @@ pub enum StorageErrorKind { #[derive(Debug, Clone, thiserror::Error, Serialize, Deserialize)] pub enum StorageResolverError { /// The storage config is invalid. - #[error("Invalid storage config: `{0}`")] + #[error("invalid storage config: `{0}`")] InvalidConfig(String), /// The URI does not contain sufficient information to connect to the storage. - #[error("Invalid storage URI: `{0}`")] + #[error("invalid storage URI: `{0}`")] InvalidUri(String), /// The requested backend is unsupported or unavailable. - #[error("Unsupported storage backend: `{0}`")] + #[error("unsupported storage backend: `{0}`")] UnsupportedBackend(String), /// The URI is valid, and is meant to be handled by this resolver, /// but the resolver failed to actually connect to the storage. /// e.g. Connection error, credential error, incompatible version, /// internal error in third party etc. - #[error("Failed to open storage {kind:?}: {message}.")] + #[error("failed to open storage {kind:?}: {message}")] FailedToOpenStorage { kind: crate::StorageErrorKind, message: String, @@ -93,7 +93,7 @@ impl From for io::Error { /// Generic StorageError. #[derive(Debug, Clone, Error)] -#[error("StorageError(kind={kind:?}, source={source})")] +#[error("storage error(kind={kind:?}, source={source})")] #[allow(missing_docs)] pub struct StorageError { pub kind: StorageErrorKind, @@ -193,7 +193,7 @@ impl fmt::Display for BulkDeleteError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "Bulk delete error ({} success(es), {} failure(s), {} unattempted)", + "bulk delete error ({} success(es), {} failure(s), {} unattempted)", self.successes.len(), self.failures.len(), self.unattempted.len() diff --git a/quickwit/quickwit-storage/src/lib.rs b/quickwit/quickwit-storage/src/lib.rs index 814f1746205..1904a0e4a6a 100644 --- a/quickwit/quickwit-storage/src/lib.rs +++ b/quickwit/quickwit-storage/src/lib.rs @@ -95,11 +95,11 @@ pub async fn load_file( ) -> anyhow::Result { let parent = uri .parent() - .ok_or_else(|| anyhow::anyhow!("URI `{uri}` is not a valid file URI."))?; + .ok_or_else(|| anyhow::anyhow!("URI `{uri}` is not a valid file URI"))?; let storage = storage_resolver.resolve(&parent).await?; let file_name = uri .file_name() - .ok_or_else(|| anyhow::anyhow!("URI `{uri}` is not a valid file URI."))?; + .ok_or_else(|| anyhow::anyhow!("URI `{uri}` is not a valid file URI"))?; let bytes = storage.get_all(file_name).await?; Ok(bytes) } diff --git a/quickwit/quickwit-storage/src/local_file_storage.rs b/quickwit/quickwit-storage/src/local_file_storage.rs index 532a6832d31..709bf958deb 100644 --- a/quickwit/quickwit-storage/src/local_file_storage.rs +++ b/quickwit/quickwit-storage/src/local_file_storage.rs @@ -70,7 +70,7 @@ impl LocalFileStorage { root: root.to_path_buf(), }) .ok_or_else(|| { - let message = format!("URI `{uri}` is not a valid file URI."); + let message = format!("URI `{uri}` is not a valid file URI"); StorageResolverError::InvalidUri(message) }) } @@ -109,7 +109,7 @@ fn ensure_valid_relative_path(path: &Path) -> StorageResult<()> { // We forbid `Path` components that are breaking the assumption that // root.join(path) is a child of root (if we omit fs links). return Err(StorageErrorKind::Unauthorized.with_error(anyhow::anyhow!( - "Path `{}` is forbidden. Only simple relative path are allowed.", + "path `{}` is forbidden. only simple relative path are allowed", path.display() ))); } @@ -183,7 +183,7 @@ impl Storage for LocalFileStorage { ) -> crate::StorageResult<()> { let full_path = self.full_path(path)?; let parent_dir = full_path.parent().ok_or_else(|| { - let err = anyhow::anyhow!("No parent directory for {full_path:?}"); + let err = anyhow::anyhow!("no parent directory for {full_path:?}"); StorageErrorKind::Internal.with_error(err) })?; @@ -299,7 +299,7 @@ impl Storage for LocalFileStorage { let full_path = self.full_path(path)?; let content_bytes = fs::read(full_path).await.map_err(|err| { StorageError::from(err).add_context(format!( - "Failed to read file {}/{}", + "failed to read file {}/{}", self.uri(), path.to_string_lossy() )) @@ -319,7 +319,7 @@ impl Storage for LocalFileStorage { Ok(metadata.len()) } else { Err(StorageErrorKind::NotFound.with_error(anyhow::anyhow!( - "File `{}` is actually a directory.", + "file `{}` is actually a directory", path.display() ))) } diff --git a/quickwit/quickwit-storage/src/object_storage/azure_blob_storage.rs b/quickwit/quickwit-storage/src/object_storage/azure_blob_storage.rs index 289539bafbd..ffe65998166 100644 --- a/quickwit/quickwit-storage/src/object_storage/azure_blob_storage.rs +++ b/quickwit/quickwit-storage/src/object_storage/azure_blob_storage.rs @@ -157,20 +157,20 @@ impl AzureBlobStorage { ) -> Result { let account_name = azure_storage_config.resolve_account_name().ok_or_else(|| { let message = format!( - "Could not find Azure account name in environment variable `{}` or storage config.", + "could not find Azure account name in environment variable `{}` or storage config", AzureStorageConfig::AZURE_STORAGE_ACCOUNT_ENV_VAR ); StorageResolverError::InvalidConfig(message) })?; let access_key = azure_storage_config.resolve_access_key().ok_or_else(|| { let message = format!( - "Could not find Azure access key in environment variable `{}` or storage config.", + "could not find Azure access key in environment variable `{}` or storage config", AzureStorageConfig::AZURE_STORAGE_ACCESS_KEY_ENV_VAR ); StorageResolverError::InvalidConfig(message) })?; let (container_name, prefix) = parse_azure_uri(uri).ok_or_else(|| { - let message = format!("Failed to extract container name from Azure URI: {uri}"); + let message = format!("failed to extract container name from Azure URI: {uri}"); StorageResolverError::InvalidUri(message) })?; let azure_blob_storage = @@ -418,7 +418,7 @@ impl Storage for AzureBlobStorage { .map(OwnedBytes::new) .map_err(|err| { err.add_context(format!( - "Failed to fetch slice {:?} for object: {}/{}", + "failed to fetch slice {:?} for object: {}/{}", range, self.uri, path.display(), @@ -434,7 +434,7 @@ impl Storage for AzureBlobStorage { .map(OwnedBytes::new) .map_err(|err| { err.add_context(format!( - "Failed to fetch object: {}/{}", + "failed to fetch object: {}/{}", self.uri, path.display() )) @@ -522,7 +522,7 @@ async fn download_all( } #[derive(Error, Debug)] -#[error("AzureErrorWrapper(inner={inner})")] +#[error("Azure error wrapper(inner={inner})")] struct AzureErrorWrapper { inner: AzureError, } diff --git a/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs b/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs index 1fa7b916ac6..a73fea4e2cd 100644 --- a/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs +++ b/quickwit/quickwit-storage/src/object_storage/s3_compatible_storage.rs @@ -162,7 +162,7 @@ impl S3CompatibleObjectStorage { uri: &Uri, ) -> Result { let (bucket, prefix) = parse_s3_uri(uri).ok_or_else(|| { - let message = format!("Failed to extract bucket name from S3 URI: {uri}"); + let message = format!("failed to extract bucket name from S3 URI: {uri}"); StorageResolverError::InvalidUri(message) })?; let storage = Self::new(s3_storage_config, uri.clone(), bucket).await?; @@ -321,7 +321,7 @@ impl S3CompatibleObjectStorage { .upload_id .ok_or_else(|| { StorageErrorKind::Internal - .with_error(anyhow!("The returned multipart upload id was null.")) + .with_error(anyhow!("the returned multipart upload id was null")) })?; Ok(MultipartUploadId(upload_id)) } @@ -739,7 +739,7 @@ impl Storage for S3CompatibleObjectStorage { .map(OwnedBytes::new) .map_err(|err| { err.add_context(format!( - "Failed to fetch slice {:?} for object: {}/{}", + "failed to fetch slice {:?} for object: {}/{}", range, self.uri, path.display(), @@ -756,7 +756,7 @@ impl Storage for S3CompatibleObjectStorage { .map(OwnedBytes::new) .map_err(|err| { err.add_context(format!( - "Failed to fetch object: {}/{}", + "failed to fetch object: {}/{}", self.uri, path.display() )) diff --git a/quickwit/quickwit-storage/src/ram_storage.rs b/quickwit/quickwit-storage/src/ram_storage.rs index 8816356a7b1..57a8e6cf849 100644 --- a/quickwit/quickwit-storage/src/ram_storage.rs +++ b/quickwit/quickwit-storage/src/ram_storage.rs @@ -102,7 +102,7 @@ impl Storage for RamStorage { async fn copy_to(&self, path: &Path, output: &mut dyn SendableAsync) -> StorageResult<()> { let payload_bytes = self.get_data(path).await.ok_or_else(|| { StorageErrorKind::NotFound - .with_error(anyhow::anyhow!("Failed to find dest_path {:?}", path)) + .with_error(anyhow::anyhow!("failed to find dest_path {:?}", path)) })?; output.write_all(&payload_bytes).await?; output.flush().await?; @@ -112,7 +112,7 @@ impl Storage for RamStorage { async fn get_slice(&self, path: &Path, range: Range) -> StorageResult { let payload_bytes = self.get_data(path).await.ok_or_else(|| { StorageErrorKind::NotFound - .with_error(anyhow::anyhow!("Failed to find dest_path {:?}", path)) + .with_error(anyhow::anyhow!("failed to find dest_path {:?}", path)) })?; Ok(payload_bytes.slice(range.start..range.end)) } @@ -133,7 +133,7 @@ impl Storage for RamStorage { async fn get_all(&self, path: &Path) -> StorageResult { let payload_bytes = self.get_data(path).await.ok_or_else(|| { StorageErrorKind::NotFound - .with_error(anyhow::anyhow!("Failed to find dest_path {:?}", path)) + .with_error(anyhow::anyhow!("failed to find dest_path {:?}", path)) })?; Ok(payload_bytes) } @@ -146,7 +146,7 @@ impl Storage for RamStorage { if let Some(file_bytes) = self.files.read().await.get(path) { Ok(file_bytes.len() as u64) } else { - let err = anyhow::anyhow!("Missing file `{}`", path.display()); + let err = anyhow::anyhow!("missing file `{}`", path.display()); Err(StorageErrorKind::NotFound.with_error(err)) } } @@ -202,7 +202,7 @@ impl StorageFactory for RamStorageFactory { uri.clone(), )), _ => { - let message = format!("URI `{uri}` is not a valid RAM URI."); + let message = format!("URI `{uri}` is not a valid RAM URI"); Err(StorageResolverError::InvalidUri(message)) } } diff --git a/quickwit/quickwit-storage/src/storage_resolver.rs b/quickwit/quickwit-storage/src/storage_resolver.rs index b9d5af2b666..38ebafa0169 100644 --- a/quickwit/quickwit-storage/src/storage_resolver.rs +++ b/quickwit/quickwit-storage/src/storage_resolver.rs @@ -60,14 +60,14 @@ impl StorageResolver { Protocol::S3 => StorageBackend::S3, _ => { let message = format!( - "Quickwit does not support {} as a storage backend.", + "Quickwit does not support {} as a storage backend", uri.protocol() ); return Err(StorageResolverError::UnsupportedBackend(message)); } }; let storage_factory = self.per_backend_factories.get(&backend).ok_or({ - let message = format!("no storage factory is registered for {}.", uri.protocol()); + let message = format!("no storage factory is registered for {}", uri.protocol()); StorageResolverError::UnsupportedBackend(message) })?; let storage = storage_factory.resolve(uri).await?; diff --git a/quickwit/quickwit-storage/src/versioned_component.rs b/quickwit/quickwit-storage/src/versioned_component.rs index 79c3fd03d5a..ed132ed34e0 100644 --- a/quickwit/quickwit-storage/src/versioned_component.rs +++ b/quickwit/quickwit-storage/src/versioned_component.rs @@ -90,7 +90,7 @@ fn try_read_version(bytes: &mut OwnedBytes) -> anyhow::Re let mut header_bytes: [u8; 8] = [0u8; 8]; bytes .read_exact(&mut header_bytes[..]) - .with_context(|| format!("Failed to read header for {}", V::component_name()))?; + .with_context(|| format!("failed to read header for {}", V::component_name()))?; try_deserialize_from_bytes::(header_bytes) } @@ -102,12 +102,12 @@ fn try_read_version(bytes: &mut OwnedBytes) -> anyhow::Re fn try_deserialize_from_bytes(header_bytes: [u8; 8]) -> anyhow::Result { let magic_number = u32::from_le_bytes(header_bytes[0..4].try_into().unwrap()); if magic_number != V::MAGIC_NUMBER { - anyhow::bail!("Hot directory metadata's magic number does not match."); + anyhow::bail!("hot directory metadata's magic number does not match"); } let version_code: u32 = u32::from_le_bytes(header_bytes[4..8].try_into().unwrap()); V::try_from_version_code_impl(version_code).with_context(|| { format!( - "Version code {} is not supported for {}", + "version code {} is not supported for {}", version_code, V::component_name() ) @@ -158,7 +158,7 @@ mod tests { match self { FakeComponentCodec::V1 => { if bytes.len() < 8 { - anyhow::bail!("Not enough bytes to deserialize"); + anyhow::bail!("not enough bytes to deserialize"); } let value_bytes: [u8; 8] = bytes[0..8].try_into().unwrap(); let value: u32 = u64::from_le_bytes(value_bytes) as u32; @@ -166,7 +166,7 @@ mod tests { } FakeComponentCodec::V2 => { if bytes.len() < 4 { - anyhow::bail!("Not enough bytes to deserialize"); + anyhow::bail!("not enough bytes to deserialize"); } let value_bytes: [u8; 4] = bytes[0..4].try_into().unwrap(); bytes.advance(4); diff --git a/quickwit/quickwit-storage/tests/azure_storage.rs b/quickwit/quickwit-storage/tests/azure_storage.rs index 6a95795fba1..786850d0fcf 100644 --- a/quickwit/quickwit-storage/tests/azure_storage.rs +++ b/quickwit/quickwit-storage/tests/azure_storage.rs @@ -46,7 +46,7 @@ async fn test_suite_on_azure_storage() -> anyhow::Result<()> { ); quickwit_storage::storage_test_single_part_upload(&mut object_storage) .await - .context("Test single-part upload failed.")?; + .context("test single-part upload failed")?; object_storage.set_policy(MultiPartPolicy { // On azure, block size is limited between 64KB and 100MB. @@ -58,7 +58,7 @@ async fn test_suite_on_azure_storage() -> anyhow::Result<()> { }); quickwit_storage::storage_test_multi_part_upload(&mut object_storage) .await - .context("Test multipart upload failed.")?; + .context("test multipart upload failed")?; // Teardown container. container_client.delete().into_future().await?; diff --git a/quickwit/quickwit-storage/tests/s3_storage.rs b/quickwit/quickwit-storage/tests/s3_storage.rs index c7a1e1ad444..cecb80936aa 100644 --- a/quickwit/quickwit-storage/tests/s3_storage.rs +++ b/quickwit/quickwit-storage/tests/s3_storage.rs @@ -58,7 +58,7 @@ async fn run_s3_storage_test_suite(s3_storage_config: S3StorageConfig, bucket_ur quickwit_storage::storage_test_suite(&mut object_storage) .await - .context("S3 storage test suite failed.") + .context("S3 storage test suite failed") .unwrap(); let mut object_storage = S3CompatibleObjectStorage::from_uri(&s3_storage_config, &storage_uri) @@ -68,7 +68,7 @@ async fn run_s3_storage_test_suite(s3_storage_config: S3StorageConfig, bucket_ur quickwit_storage::storage_test_single_part_upload(&mut object_storage) .await - .context("Test single-part upload failed.") + .context("test single-part upload failed") .unwrap(); object_storage.set_policy(MultiPartPolicy { @@ -81,7 +81,7 @@ async fn run_s3_storage_test_suite(s3_storage_config: S3StorageConfig, bucket_ur quickwit_storage::storage_test_multi_part_upload(&mut object_storage) .await - .context("Test multipart upload failed.") + .context("test multipart upload failed") .unwrap(); } @@ -132,7 +132,7 @@ fn test_suite_on_s3_storage_bulk_delete_single_object_delete_api() { .unwrap(); quickwit_storage::test_write_and_bulk_delete(&mut object_storage) .await - .context("Test bulk delete single-object delete API failed.") + .context("test bulk delete single-object delete API failed") .unwrap(); }); }