diff --git a/quickwit/Cargo.lock b/quickwit/Cargo.lock index 7b100fd40a2..930962af002 100644 --- a/quickwit/Cargo.lock +++ b/quickwit/Cargo.lock @@ -5687,6 +5687,7 @@ dependencies = [ "bytes", "bytesize", "dyn-clone", + "fail", "flume", "fnv", "futures", diff --git a/quickwit/quickwit-ingest/Cargo.toml b/quickwit/quickwit-ingest/Cargo.toml index 6734633886d..dcd342a2b9e 100644 --- a/quickwit/quickwit-ingest/Cargo.toml +++ b/quickwit/quickwit-ingest/Cargo.toml @@ -11,6 +11,7 @@ async-trait = { workspace = true } bytes = { workspace = true } bytesize = { workspace = true } dyn-clone = { workspace = true } +fail = { workspace = true } flume = { workspace = true } fnv = { workspace = true } futures = { workspace = true } diff --git a/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs b/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs index 2ebb40a9ca8..2287b56ff79 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/ingester.rs @@ -20,7 +20,6 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::fmt; -use std::iter::once; use std::path::Path; use std::sync::{Arc, Weak}; use std::time::Duration; @@ -56,8 +55,9 @@ use tracing::{debug, error, info, warn}; use super::fetch::FetchStreamTask; use super::metrics::INGEST_V2_METRICS; use super::models::IngesterShard; -use super::mrecord::MRecord; -use super::mrecordlog_utils::{check_enough_capacity, force_delete_queue}; +use super::mrecordlog_utils::{ + append_non_empty_doc_batch, check_enough_capacity, force_delete_queue, AppendDocBatchError, +}; use super::rate_meter::RateMeter; use super::replication::{ ReplicationClient, ReplicationStreamTask, ReplicationStreamTaskHandle, ReplicationTask, @@ -120,6 +120,10 @@ impl Ingester { replication_factor: usize, ) -> IngestV2Result { let self_node_id: NodeId = cluster.self_node_id().into(); + info!( + "opening write-ahead log located at `{}`", + wal_dir_path.display() + ); let mrecordlog = MultiRecordLog::open_with_prefs( wal_dir_path, mrecordlog::SyncPolicy::OnDelay(Duration::from_secs(5)), @@ -232,10 +236,12 @@ impl Ingester { num_closed_shards += 1; } else { // The queue is empty: delete it. - force_delete_queue(&mut state_guard.mrecordlog, &queue_id) - .await - .expect("TODO: handle IO error"); - + if let Err(io_error) = + force_delete_queue(&mut state_guard.mrecordlog, &queue_id).await + { + error!("failed to delete shard `{queue_id}`: {io_error}"); + continue; + } num_deleted_shards += 1; } } @@ -295,6 +301,7 @@ impl Ingester { .await?; if let Err(error) = replication_client.init_replica(shard).await { + // TODO: Remove dangling queue from the WAL. error!("failed to initialize replica shard: {error}",); return Err(IngestV2Error::Internal(format!( "failed to initialize replica shard: {error}" @@ -393,6 +400,13 @@ impl Ingester { let mut persist_failures = Vec::new(); let mut replicate_subrequests: HashMap> = HashMap::new(); + // Keep track of the shards that need to be closed following an IO error. + let mut shards_to_close: HashSet = HashSet::new(); + + // Keep track of dangling shards, i.e., shards for which there is no longer a corresponding + // queue in the WAL and should be deleted. + let mut shards_to_delete: HashSet = HashSet::new(); + let commit_type = persist_request.commit_type(); let force_commit = commit_type == CommitTypeV2::Force; let leader_id: NodeId = persist_request.leader_id.into(); @@ -513,27 +527,43 @@ impl Ingester { rate_meter.update(batch_num_bytes); - let current_position_inclusive: Position = if force_commit { - let encoded_mrecords = doc_batch - .docs() - .map(|doc| MRecord::Doc(doc).encode()) - .chain(once(MRecord::Commit.encode())); - state_guard - .mrecordlog - .append_records(&queue_id, None, encoded_mrecords) - .await - .expect("TODO") // TODO: Io error, close shard? - } else { - let encoded_mrecords = doc_batch.docs().map(|doc| MRecord::Doc(doc).encode()); - state_guard - .mrecordlog - .append_records(&queue_id, None, encoded_mrecords) - .await - .expect("TODO") // TODO: Io error, close shard? - } - .map(Position::offset) - .expect("records should not be empty"); + let append_result = append_non_empty_doc_batch( + &mut state_guard.mrecordlog, + &queue_id, + &doc_batch, + force_commit, + ) + .await; + let current_position_inclusive = match append_result { + Ok(current_position_inclusive) => current_position_inclusive, + Err(append_error) => { + let reason = match &append_error { + AppendDocBatchError::Io(io_error) => { + error!("failed to persist records to shard `{queue_id}`: {io_error}"); + shards_to_close.insert(queue_id); + PersistFailureReason::ShardClosed + } + AppendDocBatchError::QueueNotFound(_) => { + error!( + "failed to persist records to shard `{queue_id}`: WAL queue not \ + found" + ); + shards_to_delete.insert(queue_id); + PersistFailureReason::ShardNotFound + } + }; + let persist_failure = PersistFailure { + subrequest_id: subrequest.subrequest_id, + index_uid: subrequest.index_uid, + source_id: subrequest.source_id, + shard_id: subrequest.shard_id, + reason: reason as i32, + }; + persist_failures.push(persist_failure); + continue; + } + }; // It's more precise the compute the new usage from the current usage + the requested // capacity than from continuously summing up the requested capacities, which are // approximations. @@ -581,6 +611,28 @@ impl Ingester { persist_successes.push(persist_success); } } + if !shards_to_close.is_empty() { + for queue_id in &shards_to_close { + let shard = state_guard + .shards + .get_mut(queue_id) + .expect("shard should exist"); + + shard.shard_state = ShardState::Closed; + shard.notify_shard_status(); + } + info!( + "closed {} shard(s) following IO error(s)", + shards_to_close.len() + ); + } + if !shards_to_delete.is_empty() { + for queue_id in &shards_to_delete { + state_guard.shards.remove(queue_id); + state_guard.rate_trackers.remove(queue_id); + } + info!("deleted {} dangling shard(s)", shards_to_delete.len()); + } if replicate_subrequests.is_empty() { let leader_id = self.self_node_id.to_string(); let persist_response = PersistResponse { @@ -1040,29 +1092,28 @@ impl IngesterState { shard.truncation_position_inclusive = truncate_up_to_position_inclusive; } Err(TruncateError::MissingQueue(_)) => { - warn!("failed to truncate WAL queue `{queue_id}`: queue does not exist"); + error!("failed to truncate shard `{queue_id}`: WAL queue not found"); + self.shards.remove(queue_id); + self.rate_trackers.remove(queue_id); + info!("deleted dangling shard `{queue_id}`"); } - Err(error) => { - error!(%error, "failed to truncate WAL queue `{queue_id}`"); + Err(TruncateError::IoError(io_error)) => { + error!("failed to truncate shard `{queue_id}`: {io_error}"); } }; } /// Deletes the shard identified by `queue_id` from the ingester state. It removes the - /// mrecordlog queue first and then, if the operation is successful, removes the shard. + /// mrecordlog queue first and then removes the associated in-memory shard and rate trackers. async fn delete_shard(&mut self, queue_id: &QueueId) { match self.mrecordlog.delete_queue(queue_id).await { - Ok(_) => { + Ok(_) | Err(DeleteQueueError::MissingQueue(_)) => { self.shards.remove(queue_id); self.rate_trackers.remove(queue_id); - - info!("deleted shard `{queue_id}` from ingester"); - } - Err(DeleteQueueError::MissingQueue(_)) => { - // The shard has already been deleted. + info!("deleted shard `{queue_id}`"); } - Err(DeleteQueueError::IoError(_)) => { - panic!("TODO: handle IO error") + Err(DeleteQueueError::IoError(io_error)) => { + error!("failed to delete shard `{queue_id}`: {io_error}"); } }; } @@ -1150,6 +1201,7 @@ mod tests { use crate::ingest_v2::broadcast::ShardInfos; use crate::ingest_v2::fetch::tests::{into_fetch_eof, into_fetch_payload}; use crate::ingest_v2::test_utils::MultiRecordLogTestExt; + use crate::MRecord; pub(super) struct IngesterForTest { node_id: NodeId, @@ -1499,6 +1551,169 @@ mod tests { ); } + #[tokio::test] + async fn test_ingester_persist_empty() { + let (ingester_ctx, mut ingester) = IngesterForTest::default().build().await; + + let persist_request = PersistRequest { + leader_id: ingester_ctx.node_id.to_string(), + commit_type: CommitTypeV2::Force as i32, + subrequests: Vec::new(), + }; + let persist_response = ingester.persist(persist_request).await.unwrap(); + assert_eq!(persist_response.leader_id, "test-ingester"); + assert_eq!(persist_response.successes.len(), 0); + assert_eq!(persist_response.failures.len(), 0); + + let persist_request = PersistRequest { + leader_id: "test-ingester".to_string(), + commit_type: CommitTypeV2::Force as i32, + subrequests: vec![PersistSubrequest { + subrequest_id: 0, + index_uid: "test-index:0".to_string(), + source_id: "test-source".to_string(), + shard_id: 1, + doc_batch: None, + }], + }; + + let init_shards_request = InitShardsRequest { + shards: vec![Shard { + index_uid: "test-index:0".to_string(), + source_id: "test-source".to_string(), + shard_id: 1, + shard_state: ShardState::Open as i32, + leader_id: ingester_ctx.node_id.to_string(), + ..Default::default() + }], + }; + ingester.init_shards(init_shards_request).await.unwrap(); + + let persist_response = ingester.persist(persist_request).await.unwrap(); + assert_eq!(persist_response.leader_id, "test-ingester"); + assert_eq!(persist_response.successes.len(), 1); + assert_eq!(persist_response.failures.len(), 0); + + let persist_success = &persist_response.successes[0]; + assert_eq!(persist_success.subrequest_id, 0); + assert_eq!(persist_success.index_uid, "test-index:0"); + assert_eq!(persist_success.source_id, "test-source"); + assert_eq!(persist_success.shard_id, 1); + assert_eq!( + persist_success.replication_position_inclusive, + Some(Position::Beginning) + ); + } + + // This test should be run manually and independently of other tests with the `fail/failpoints` + // feature enabled. + #[tokio::test] + #[ignore] + async fn test_ingester_persist_closes_shard_on_io_error() { + let scenario = fail::FailScenario::setup(); + fail::cfg("ingester:append_records", "return").unwrap(); + + let (_ingester_ctx, mut ingester) = IngesterForTest::default().build().await; + + let mut state_guard = ingester.state.write().await; + let queue_id = queue_id("test-index:0", "test-source", 1); + let solo_shard = + IngesterShard::new_solo(ShardState::Open, Position::Beginning, Position::Beginning); + state_guard.shards.insert(queue_id.clone(), solo_shard); + + state_guard + .mrecordlog + .create_queue(&queue_id) + .await + .unwrap(); + + let rate_limiter = RateLimiter::from_settings(RateLimiterSettings::default()); + let rate_meter = RateMeter::default(); + state_guard + .rate_trackers + .insert(queue_id.clone(), (rate_limiter, rate_meter)); + + drop(state_guard); + + let persist_request = PersistRequest { + leader_id: "test-ingester".to_string(), + commit_type: CommitTypeV2::Force as i32, + subrequests: vec![PersistSubrequest { + subrequest_id: 0, + index_uid: "test-index:0".to_string(), + source_id: "test-source".to_string(), + shard_id: 1, + doc_batch: Some(DocBatchV2::for_test(["test-doc-foo"])), + }], + }; + let persist_response = ingester.persist(persist_request).await.unwrap(); + assert_eq!(persist_response.leader_id, "test-ingester"); + assert_eq!(persist_response.successes.len(), 0); + assert_eq!(persist_response.failures.len(), 1); + + let persist_failure = &persist_response.failures[0]; + assert_eq!(persist_failure.subrequest_id, 0); + assert_eq!(persist_failure.index_uid, "test-index:0"); + assert_eq!(persist_failure.source_id, "test-source"); + assert_eq!(persist_failure.shard_id, 1); + assert_eq!(persist_failure.reason(), PersistFailureReason::ShardClosed,); + + let state_guard = ingester.state.read().await; + let shard = state_guard.shards.get(&queue_id).unwrap(); + shard.assert_is_closed(); + + scenario.teardown(); + } + + #[tokio::test] + async fn test_ingester_persist_deletes_dangling_shard() { + let (_ingester_ctx, mut ingester) = IngesterForTest::default().build().await; + + let mut state_guard = ingester.state.write().await; + let queue_id = queue_id("test-index:0", "test-source", 1); + let solo_shard = + IngesterShard::new_solo(ShardState::Open, Position::Beginning, Position::Beginning); + state_guard.shards.insert(queue_id.clone(), solo_shard); + + let rate_limiter = RateLimiter::from_settings(RateLimiterSettings::default()); + let rate_meter = RateMeter::default(); + state_guard + .rate_trackers + .insert(queue_id.clone(), (rate_limiter, rate_meter)); + + drop(state_guard); + + let persist_request = PersistRequest { + leader_id: "test-ingester".to_string(), + commit_type: CommitTypeV2::Force as i32, + subrequests: vec![PersistSubrequest { + subrequest_id: 0, + index_uid: "test-index:0".to_string(), + source_id: "test-source".to_string(), + shard_id: 1, + doc_batch: Some(DocBatchV2::for_test(["test-doc-foo"])), + }], + }; + let persist_response = ingester.persist(persist_request).await.unwrap(); + assert_eq!(persist_response.leader_id, "test-ingester"); + assert_eq!(persist_response.successes.len(), 0); + assert_eq!(persist_response.failures.len(), 1); + + let persist_failure = &persist_response.failures[0]; + assert_eq!(persist_failure.subrequest_id, 0); + assert_eq!(persist_failure.index_uid, "test-index:0"); + assert_eq!(persist_failure.source_id, "test-source"); + assert_eq!(persist_failure.shard_id, 1); + assert_eq!( + persist_failure.reason(), + PersistFailureReason::ShardNotFound + ); + + let state_guard = ingester.state.read().await; + assert_eq!(state_guard.shards.len(), 0); + assert_eq!(state_guard.rate_trackers.len(), 0); + } + #[tokio::test] async fn test_ingester_persist_replicate() { let (leader_ctx, mut leader) = IngesterForTest::default() @@ -2142,7 +2357,7 @@ mod tests { } #[tokio::test] - async fn test_ingester_truncate() { + async fn test_ingester_truncate_shards() { let (ingester_ctx, mut ingester) = IngesterForTest::default().build().await; let shard_01 = Shard { @@ -2243,6 +2458,44 @@ mod tests { assert!(!state_guard.mrecordlog.queue_exists(&queue_id_02)); } + #[tokio::test] + async fn test_ingester_truncate_shards_deletes_dangling_shards() { + let (ingester_ctx, mut ingester) = IngesterForTest::default().build().await; + + let queue_id = queue_id("test-index:0", "test-source", 1); + + let mut state_guard = ingester.state.write().await; + let solo_shard = + IngesterShard::new_solo(ShardState::Open, Position::Beginning, Position::Beginning); + state_guard.shards.insert(queue_id.clone(), solo_shard); + + let rate_limiter = RateLimiter::from_settings(RateLimiterSettings::default()); + let rate_meter = RateMeter::default(); + state_guard + .rate_trackers + .insert(queue_id.clone(), (rate_limiter, rate_meter)); + + drop(state_guard); + + let truncate_shards_request = TruncateShardsRequest { + ingester_id: ingester_ctx.node_id.to_string(), + subrequests: vec![TruncateShardsSubrequest { + index_uid: "test-index:0".to_string(), + source_id: "test-source".to_string(), + shard_id: 1, + truncate_up_to_position_inclusive: Some(Position::offset(0u64)), + }], + }; + ingester + .truncate_shards(truncate_shards_request.clone()) + .await + .unwrap(); + + let state_guard = ingester.state.read().await; + assert_eq!(state_guard.shards.len(), 0); + assert_eq!(state_guard.rate_trackers.len(), 0); + } + #[tokio::test] async fn test_ingester_retain_shards() { let (_ingester_ctx, mut ingester) = IngesterForTest::default().build().await; diff --git a/quickwit/quickwit-ingest/src/ingest_v2/mrecordlog_utils.rs b/quickwit/quickwit-ingest/src/ingest_v2/mrecordlog_utils.rs index e51e90f956c..eaf17f0e469 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/mrecordlog_utils.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/mrecordlog_utils.rs @@ -18,12 +18,71 @@ // along with this program. If not, see . use std::io; +use std::iter::once; use std::ops::RangeInclusive; use bytesize::ByteSize; -use mrecordlog::error::DeleteQueueError; +use fail::fail_point; +use mrecordlog::error::{AppendError, DeleteQueueError}; use mrecordlog::MultiRecordLog; -use quickwit_proto::types::QueueId; +use quickwit_proto::ingest::DocBatchV2; +use quickwit_proto::types::{Position, QueueId}; + +use crate::MRecord; + +#[derive(Debug, thiserror::Error)] +pub(super) enum AppendDocBatchError { + #[error("IO error: {0}")] + Io(#[from] io::Error), + #[error("WAL queue `{0}` not found")] + QueueNotFound(QueueId), +} + +/// Appends a non-empty document batch to the WAL queue `queue_id`. +/// +/// # Panics +/// +/// Panics if `doc_batch` is empty. +pub(super) async fn append_non_empty_doc_batch( + mrecordlog: &mut MultiRecordLog, + queue_id: &QueueId, + doc_batch: &DocBatchV2, + force_commit: bool, +) -> Result { + let append_result = if force_commit { + let encoded_mrecords = doc_batch + .docs() + .map(|doc| MRecord::Doc(doc).encode()) + .chain(once(MRecord::Commit.encode())); + fail_point!("ingester:append_records", |_| { + let io_error = io::Error::from(io::ErrorKind::PermissionDenied); + Err(AppendDocBatchError::Io(io_error)) + }); + mrecordlog + .append_records(queue_id, None, encoded_mrecords) + .await + } else { + let encoded_mrecords = doc_batch.docs().map(|doc| MRecord::Doc(doc).encode()); + fail_point!("ingester:append_records", |_| { + let io_error = io::Error::from(io::ErrorKind::PermissionDenied); + Err(AppendDocBatchError::Io(io_error)) + }); + mrecordlog + .append_records(queue_id, None, encoded_mrecords) + .await + }; + match append_result { + Ok(Some(offset)) => Ok(Position::offset(offset)), + Ok(None) => panic!("`doc_batch` should not be empty"), + Err(AppendError::IoError(io_error)) => Err(AppendDocBatchError::Io(io_error)), + Err(AppendError::MissingQueue(queue_id)) => { + Err(AppendDocBatchError::QueueNotFound(queue_id)) + } + Err(AppendError::Past) => { + panic!("`append_records` should be called with `position_opt: None`") + } + } +} #[derive(Debug, Clone, Copy)] pub(super) struct MRecordLogUsage { @@ -31,9 +90,6 @@ pub(super) struct MRecordLogUsage { pub memory: ByteSize, } -#[derive(Debug, Clone, Copy)] -pub(super) struct MemoryUsage(ByteSize); - /// Error returned when the mrecordlog does not have enough capacity to store some records. #[derive(Debug, Clone, Copy, thiserror::Error)] pub(super) enum NotEnoughCapacityError { @@ -123,6 +179,62 @@ pub(super) fn queue_position_range( mod tests { use super::*; + #[tokio::test] + async fn test_append_non_empty_doc_batch() { + let tempdir = tempfile::tempdir().unwrap(); + let mut mrecordlog = MultiRecordLog::open(tempdir.path()).await.unwrap(); + + let queue_id = "test-queue".to_string(); + let doc_batch = DocBatchV2::for_test(["test-doc-foo"]); + + let append_error = + append_non_empty_doc_batch(&mut mrecordlog, &queue_id, &doc_batch, false) + .await + .unwrap_err(); + + assert!(matches!( + append_error, + AppendDocBatchError::QueueNotFound(..) + )); + + mrecordlog.create_queue(&queue_id).await.unwrap(); + + let position = append_non_empty_doc_batch(&mut mrecordlog, &queue_id, &doc_batch, false) + .await + .unwrap(); + assert_eq!(position, Position::offset(0u64)); + + let position = append_non_empty_doc_batch(&mut mrecordlog, &queue_id, &doc_batch, true) + .await + .unwrap(); + assert_eq!(position, Position::offset(2u64)); + } + + // This test should be run manually and independently of other tests with the `fail/failpoints` + // feature enabled. + #[tokio::test] + #[ignore] + async fn test_append_non_empty_doc_batch_io_error() { + let scenario = fail::FailScenario::setup(); + fail::cfg("ingester:append_records", "return").unwrap(); + + let tempdir = tempfile::tempdir().unwrap(); + let mut mrecordlog = MultiRecordLog::open(tempdir.path()).await.unwrap(); + + let queue_id = "test-queue".to_string(); + mrecordlog.create_queue(&queue_id).await.unwrap(); + + let doc_batch = DocBatchV2::for_test(["test-doc-foo"]); + let append_error = + append_non_empty_doc_batch(&mut mrecordlog, &queue_id, &doc_batch, false) + .await + .unwrap_err(); + + assert!(matches!(append_error, AppendDocBatchError::Io(..))); + + scenario.teardown(); + } + #[tokio::test] async fn test_check_enough_capacity() { let tempdir = tempfile::tempdir().unwrap(); diff --git a/quickwit/quickwit-ingest/src/ingest_v2/router.rs b/quickwit/quickwit-ingest/src/ingest_v2/router.rs index eb9fb9c4a61..868478cef32 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/router.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/router.rs @@ -265,7 +265,7 @@ impl IngestRouter { .unavailable_leaders .insert(persist_summary.leader_id); for subrequest_id in persist_summary.subrequest_ids { - workbench.record_connection_error(subrequest_id); + workbench.record_transport_error(subrequest_id); } } IngestV2Error::TooManyRequests @@ -1121,7 +1121,7 @@ mod tests { let subworkbench = workbench.subworkbenches.get(&1).unwrap(); assert!(matches!( subworkbench.last_failure_opt, - Some(SubworkbenchFailure::TransportError) + Some(SubworkbenchFailure::Transport) )); } diff --git a/quickwit/quickwit-ingest/src/ingest_v2/workbench.rs b/quickwit/quickwit-ingest/src/ingest_v2/workbench.rs index a67e0c31615..28e305221a4 100644 --- a/quickwit/quickwit-ingest/src/ingest_v2/workbench.rs +++ b/quickwit/quickwit-ingest/src/ingest_v2/workbench.rs @@ -150,8 +150,8 @@ impl IngestWorkbench { /// Marks a node as unavailable for the span of the workbench. /// /// Remaining attempts will treat the node as if it was not in the ingester pool. - pub fn record_connection_error(&mut self, subrequest_id: SubrequestId) { - self.record_failure(subrequest_id, SubworkbenchFailure::TransportError); + pub fn record_transport_error(&mut self, subrequest_id: SubrequestId) { + self.record_failure(subrequest_id, SubworkbenchFailure::Transport); } pub fn record_persist_failure(&mut self, persist_failure: &PersistFailure) { @@ -217,7 +217,7 @@ pub(super) enum SubworkbenchFailure { SourceNotFound, NoShardsAvailable, // Transport error: we failed to reach the ingester. - TransportError, + Transport, // This is an error supplied by the ingester. Persist(PersistFailureReason), Internal(String), @@ -232,7 +232,7 @@ impl SubworkbenchFailure { Self::NoShardsAvailable => IngestFailureReason::NoShardsAvailable, // In our last attempt, we did not manage to reach the ingester. // We can consider that as a no shards available. - Self::TransportError => IngestFailureReason::NoShardsAvailable, + Self::Transport => IngestFailureReason::NoShardsAvailable, Self::Persist(persist_failure_reason) => (*persist_failure_reason).into(), } } @@ -271,7 +271,7 @@ impl IngestSubworkbench { // No need to retry no shards were available. Some(SubworkbenchFailure::NoShardsAvailable) => false, Some(SubworkbenchFailure::Persist(_)) => true, - Some(SubworkbenchFailure::TransportError) => true, + Some(SubworkbenchFailure::Transport) => true, None => true, } } @@ -292,7 +292,7 @@ mod tests { assert!(subworkbench.is_pending()); assert!(subworkbench.last_failure_is_transient()); - subworkbench.last_failure_opt = Some(SubworkbenchFailure::TransportError); + subworkbench.last_failure_opt = Some(SubworkbenchFailure::Transport); assert!(subworkbench.is_pending()); assert!(subworkbench.last_failure_is_transient());