diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 06fed42b287..7326ce48eac 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -648,18 +648,6 @@ pub(super) struct PeerState { /// /// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty. pub(super) channel_by_id: HashMap<[u8; 32], ChannelPhase>, - /// `temporary_channel_id` -> `OutboundV1Channel`. - /// - /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has - /// been assigned a `channel_id`, the entry in this map is removed and one is created in - /// `channel_by_id`. - pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel>, - /// `temporary_channel_id` -> `InboundV1Channel`. - /// - /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has - /// been assigned a `channel_id`, the entry in this map is removed and one is created in - /// `channel_by_id`. - pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel>, /// `temporary_channel_id` -> `InboundChannelRequest`. /// /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where @@ -713,23 +701,19 @@ impl PeerState { if require_disconnected && self.is_connected { return false } - self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty() + self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0 + && self.monitor_update_blocked_actions.is_empty() && self.in_flight_monitor_updates.is_empty() } // Returns a count of all channels we have with this peer, including unfunded channels. fn total_channel_count(&self) -> usize { - self.channel_by_id.len() + - self.outbound_v1_channel_by_id.len() + - self.inbound_v1_channel_by_id.len() + - self.inbound_channel_request_by_id.len() + self.channel_by_id.len() + self.inbound_channel_request_by_id.len() } // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer. fn has_channel(&self, channel_id: &[u8; 32]) -> bool { self.channel_by_id.contains_key(channel_id) || - self.outbound_v1_channel_by_id.contains_key(channel_id) || - self.inbound_v1_channel_by_id.contains_key(channel_id) || self.inbound_channel_request_by_id.contains_key(channel_id) } } @@ -1777,28 +1761,6 @@ macro_rules! update_maps_on_chan_removal { }} } -/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) -macro_rules! convert_unfunded_chan_err { - ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => { - match $err { - ChannelError::Warn(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone())) - }, - ChannelError::Ignore(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) - }, - ChannelError::Close(msg) => { - log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, &$channel.context); - let shutdown_res = $channel.context.force_shutdown(true); - - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), - shutdown_res, None, $channel.context.get_value_satoshis())) - }, - } - }; -} - /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_funded_chan_err { ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => { @@ -1886,31 +1848,6 @@ macro_rules! try_chan_phase_entry { } } -macro_rules! try_unfunded_chan_entry { - ($self: ident, $res: expr, $entry: expr) => { - match $res { - Ok(res) => res, - Err(e) => { - let (drop, res) = convert_unfunded_chan_err!($self, e, $entry.get_mut(), $entry.key()); - if drop { - $entry.remove_entry(); - } - return Err(res); - } - } - } -} - -macro_rules! remove_channel { - ($self: expr, $entry: expr) => { - { - let channel = $entry.remove_entry().1; - update_maps_on_chan_removal!($self, &channel.context); - channel - } - } -} - macro_rules! remove_channel_phase { ($self: expr, $entry: expr) => { { @@ -2328,7 +2265,7 @@ where let res = channel.get_open_channel(self.genesis_hash.clone()); let temporary_channel_id = channel.context.channel_id(); - match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) { + match peer_state.channel_by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(fuzzing) { return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); @@ -2336,7 +2273,7 @@ where panic!("RNG is bad???"); } }, - hash_map::Entry::Vacant(entry) => { entry.insert(channel); } + hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); } } peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { @@ -2398,16 +2335,6 @@ where peer_state.latest_features.clone(), &self.fee_estimator); res.push(details); } - for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() { - let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, - peer_state.latest_features.clone(), &self.fee_estimator); - res.push(details); - } - for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() { - let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, - peer_state.latest_features.clone(), &self.fee_estimator); - res.push(details); - } } } res @@ -2441,8 +2368,6 @@ where return peer_state.channel_by_id .iter() .map(|(_, phase)| phase.chan_context()) - .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) - .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) .map(chan_context_to_details) .collect(); } @@ -2688,20 +2613,6 @@ where (None, chan_phase.chan_context().get_counterparty_node_id()) }, } - } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) { - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.issue_channel_close_events(&chan.get().context, closure_reason); - let mut chan = remove_channel!(self, chan); - self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Unfunded channel has no update - (None, chan.context.get_counterparty_node_id()) - } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.issue_channel_close_events(&chan.get().context, closure_reason); - let mut chan = remove_channel!(self, chan); - self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Unfunded channel has no update - (None, chan.context.get_counterparty_node_id()) } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); // N.B. that we don't send any channel close event here: we @@ -3537,8 +3448,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) { - Some(chan) => { + let (chan, msg) = match peer_state.channel_by_id.remove(temporary_channel_id) { + Some(ChannelPhase::UnfundedOutboundV1(chan)) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger) @@ -3562,6 +3473,15 @@ where }, } }, + Some(phase) => { + // Re-add ChannelPhase if not UnfundedOutboundV1 + peer_state.channel_by_id.insert(temporary_channel_id.clone(), phase); + return Err(APIError::APIMisuseError { + err: format!( + "Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel", + log_bytes!(*temporary_channel_id), counterparty_node_id), + }) + }, None => { return Err(APIError::ChannelUnavailable { err: format!( @@ -3728,23 +3648,27 @@ where if !channel_phase.chan_context_mut().update_config(&config) { continue; } - if let ChannelPhase::Funded(channel) = channel_phase { - if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); - } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.context.get_counterparty_node_id(), - msg, - }); + match channel_phase { + ChannelPhase::Funded(channel) => { + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.context.get_counterparty_node_id(), + msg, + }); + } + }, + ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { + let context = channel_phase.chan_context_mut(); + let mut config = context.config(); + config.apply(config_update); + // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready` + // which would be the case for pending inbound/outbound channels. + context.update_config(&config); } } continue; - } - - let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) { - &mut channel.context - } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) { - &mut channel.context } else { // This should not be reachable as we've already checked for non-existence in the previous channel_id loop. debug_assert!(false); @@ -3754,11 +3678,6 @@ where log_bytes!(*channel_id), counterparty_node_id), }); }; - let mut config = context.config(); - config.apply(config_update); - // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready` - // which would be the case for pending inbound/outbound channels. - context.update_config(&config); } Ok(()) } @@ -4647,13 +4566,6 @@ where } }); - peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick( - chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events, - counterparty_node_id)); - peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick( - chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events, - counterparty_node_id)); - for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() { if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 { log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", log_bytes!(&chan_id[..])); @@ -5509,7 +5421,7 @@ where msg: channel.accept_inbound_channel(), }); - peer_state.inbound_v1_channel_by_id.insert(temporary_channel_id.clone(), channel); + peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel)); Ok(()) } @@ -5541,20 +5453,26 @@ where peer: &PeerState<::Signer>, best_block_height: u32 ) -> usize { let mut num_unfunded_channels = 0; - for chan in peer.channel_by_id.iter().filter_map( - |(_, phase)| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ) { - // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those - // which have not yet had any confirmations on-chain. - if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 && - chan.context.get_funding_tx_confirmations(best_block_height) == 0 - { - num_unfunded_channels += 1; - } - } - for (_, chan) in peer.inbound_v1_channel_by_id.iter() { - if chan.context.minimum_depth().unwrap_or(1) != 0 { - num_unfunded_channels += 1; + for (_, phase) in peer.channel_by_id.iter() { + match phase { + ChannelPhase::Funded(chan) => { + // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those + // which have not yet had any confirmations on-chain. + if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 && + chan.context.get_funding_tx_confirmations(best_block_height) == 0 + { + num_unfunded_channels += 1; + } + }, + ChannelPhase::UnfundedInboundV1(chan) => { + if chan.context.minimum_depth().unwrap_or(1) != 0 { + num_unfunded_channels += 1; + } + }, + ChannelPhase::UnfundedOutboundV1(_) => { + // Outbound channels don't contribute to the unfunded count in the DoS context. + continue; + } } } num_unfunded_channels + peer.inbound_channel_request_by_id.len() @@ -5655,7 +5573,7 @@ where node_id: counterparty_node_id.clone(), msg: channel.accept_inbound_channel(), }); - peer_state.inbound_v1_channel_by_id.insert(channel_id, channel); + peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel)); Ok(()) } @@ -5669,10 +5587,17 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - try_unfunded_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); - (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id()) + match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut phase) => { + match phase.get_mut() { + ChannelPhase::UnfundedOutboundV1(chan) => { + try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase); + (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id()) + }, + _ => { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)); + } + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } @@ -5701,8 +5626,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let (chan, funding_msg, monitor) = - match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) { - Some(inbound_chan) => { + match peer_state.channel_by_id.remove(&msg.temporary_channel_id) { + Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => { match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) { Ok(res) => res, Err((mut inbound_chan, err)) => { @@ -5717,6 +5642,9 @@ where }, } }, + Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)); + }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; @@ -5871,51 +5799,46 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per - // https://github.com/lightningdevkit/rust-lightning/issues/2422 - if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) { - log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); - self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); - let mut chan = remove_channel!(self, chan_entry); - self.finish_force_close_channel(chan.context.force_shutdown(false)); - return Ok(()); - } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) { - log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); - self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); - let mut chan = remove_channel!(self, chan_entry); - self.finish_force_close_channel(chan.context.force_shutdown(false)); - return Ok(()); - } else if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { - if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - if !chan.received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan.sent_shutdown() { " after we initiated shutdown" } else { "" }); - } + if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { + let phase = chan_phase_entry.get_mut(); + match phase { + ChannelPhase::Funded(chan) => { + if !chan.received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan.sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - let funding_txo_opt = chan.context.get_funding_txo(); - let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, - chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry); - dropped_htlcs = htlcs; + let funding_txo_opt = chan.context.get_funding_txo(); + let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, + chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry); + dropped_htlcs = htlcs; - if let Some(msg) = shutdown { - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ()); - } - break Ok(()); - } else { - // + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ()); + } + break Ok(()); + }, + ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => { + let context = phase.chan_context_mut(); + log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..])); + self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); + let mut chan = remove_channel_phase!(self, chan_phase_entry); + self.finish_force_close_channel(chan.chan_context_mut().force_shutdown(false)); + return Ok(()); + }, } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -7574,16 +7497,6 @@ where self.issue_channel_close_events(&chan_context, ClosureReason::DisconnectedPeer); false }); - peer_state.inbound_v1_channel_by_id.retain(|_, chan| { - update_maps_on_chan_removal!(self, &chan.context); - self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); - false - }); - peer_state.outbound_v1_channel_by_id.retain(|_, chan| { - update_maps_on_chan_removal!(self, &chan.context); - self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); - false - }); pending_msg_events.retain(|msg| { match msg { // V1 Channel Establishment @@ -7665,8 +7578,6 @@ where } e.insert(Mutex::new(PeerState { channel_by_id: HashMap::new(), - outbound_v1_channel_by_id: HashMap::new(), - inbound_v1_channel_by_id: HashMap::new(), inbound_channel_request_by_id: HashMap::new(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), @@ -7729,9 +7640,7 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - peer_state.channel_by_id.keys().cloned() - .chain(peer_state.outbound_v1_channel_by_id.keys().cloned()) - .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect() + peer_state.channel_by_id.keys().cloned().collect() }; for channel_id in channel_ids { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel @@ -7745,7 +7654,7 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) { + if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) { if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, @@ -8870,8 +8779,6 @@ where let peer_state_from_chans = |channel_by_id| { PeerState { channel_by_id, - outbound_v1_channel_by_id: HashMap::new(), - inbound_v1_channel_by_id: HashMap::new(), inbound_channel_request_by_id: HashMap::new(), latest_features: InitFeatures::empty(), pending_msg_events: Vec::new(), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 8180e3d6440..3674c429db8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -811,28 +811,6 @@ macro_rules! get_channel_ref { } } -#[cfg(test)] -macro_rules! get_outbound_v1_channel_ref { - ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { - { - $per_peer_state_lock = $node.node.per_peer_state.read().unwrap(); - $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - $peer_state_lock.outbound_v1_channel_by_id.get_mut(&$channel_id).unwrap() - } - } -} - -#[cfg(test)] -macro_rules! get_inbound_v1_channel_ref { - ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { - { - $per_peer_state_lock = $node.node.per_peer_state.read().unwrap(); - $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - $peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap() - } - } -} - #[cfg(test)] macro_rules! get_feerate { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index b193c3527b0..579f9ffa2bf 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -181,14 +181,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; let mut sender_node_per_peer_lock; let mut sender_node_peer_state_lock; - if send_from_initiator { - let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); - chan.context.holder_selected_channel_reserve_satoshis = 0; - chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; - } else { - let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); - chan.context.holder_selected_channel_reserve_satoshis = 0; - chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; + + let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + match channel_phase { + ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => { + let chan_context = channel_phase.chan_context_mut(); + chan_context.holder_selected_channel_reserve_satoshis = 0; + chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000; + }, + ChannelPhase::Funded(_) => assert!(false), } } @@ -8884,9 +8885,13 @@ fn test_duplicate_chan_id() { // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); - let logger = test_utils::TestLogger::new(); - as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap() + match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() { + ChannelPhase::UnfundedOutboundV1(chan) => { + let logger = test_utils::TestLogger::new(); + chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap() + }, + _ => panic!("Unexpected ChannelPhase variant"), + } }; check_added_monitors!(nodes[0], 0); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); @@ -9493,8 +9498,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if on_holder_tx { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); - chan.context.holder_dust_limit_satoshis = 546; + match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) { + ChannelPhase::UnfundedOutboundV1(chan) => { + chan.context.holder_dust_limit_satoshis = 546; + }, + _ => panic!("Unexpected ChannelPhase variant"), + } } nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); @@ -9978,7 +9987,7 @@ fn test_remove_expired_outbound_unfunded_channels() { let check_outbound_channel_existence = |should_exist: bool| { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist); + assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist); }; // Channel should exist without any timer ticks. @@ -10029,7 +10038,7 @@ fn test_remove_expired_inbound_unfunded_channels() { let check_inbound_channel_existence = |should_exist: bool| { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist); + assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist); }; // Channel should exist without any timer ticks. diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0dc2fc47b10..9103cbf7ce2 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1731,7 +1731,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap(); let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { - err: format!("Funded channel with id {} not found for the passed counterparty node_id {}.", + err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.", log_bytes!(temp_chan_id), nodes[2].node.get_our_node_id()) }); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);