Skip to content

Commit

Permalink
Add peer state maps for V2 inbound/outbound channels
Browse files Browse the repository at this point in the history
  • Loading branch information
dunxen committed Jul 12, 2023
1 parent a3f0dd8 commit a22bd08
Showing 1 changed file with 78 additions and 8 deletions.
86 changes: 78 additions & 8 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
use crate::ln::channel::{Channel, ChannelContext, OutboundV1Channel, InboundV1Channel, OutboundV2Channel, InboundV2Channel, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::InvoiceFeatures;
Expand Down Expand Up @@ -628,6 +628,27 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
/// been assigned a `channel_id`, the entry in this map is removed and one is created in
/// `channel_by_id`.
pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel<Signer>>,
/// `(temporary_)channel_id` -> `OutboundV2Channel`.
///
/// Holds all outbound V2 channels where the peer is the counterparty. V2 channels are assigned
/// a `channel_id` before a funding transaction is created interactively as it's derived from
/// both parties' revocation basepoints once these are known. Hence, this map's keys are either
/// temporary channel IDs or channel IDs.
///
/// The entries in this map are only moved to `channel_by_id` once interactive transaction
/// construction completes successfully.
pub(super) outbound_v2_channel_by_id: HashMap<[u8; 32], OutboundV2Channel<Signer>>,
/// `channel_id` -> `InboundV2Channel`.
///
/// Holds all inbound V2 channels where the peer is the counterparty. V2 channels are assigned
/// a `channel_id` before a funding transaction is created interactively as it's derived from
/// both parties' revocation basepoints once these are known. At the stage of receiving an
/// `open_channel2` request, we have enough information to derive the `channel_id`. Hence, this
/// map's keys are always `channel_id`s.
///
/// The entries in this map are only moved to `channel_by_id` once interactive transaction
/// construction completes successfully.
pub(super) inbound_v2_channel_by_id: HashMap<[u8; 32], InboundV2Channel<Signer>>,
/// The latest `InitFeatures` we heard from the peer.
latest_features: InitFeatures,
/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
Expand Down Expand Up @@ -682,14 +703,18 @@ impl <Signer: ChannelSigner> PeerState<Signer> {
fn total_channel_count(&self) -> usize {
self.channel_by_id.len() +
self.outbound_v1_channel_by_id.len() +
self.inbound_v1_channel_by_id.len()
self.inbound_v1_channel_by_id.len() +
self.outbound_v2_channel_by_id.len() +
self.inbound_v2_channel_by_id.len()
}

// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
fn has_channel(&self, channel_id: &[u8; 32]) -> bool {
self.channel_by_id.contains_key(channel_id) ||
self.outbound_v1_channel_by_id.contains_key(channel_id) ||
self.inbound_v1_channel_by_id.contains_key(channel_id)
self.inbound_v1_channel_by_id.contains_key(channel_id) ||
self.outbound_v2_channel_by_id.contains_key(channel_id) ||
self.inbound_v2_channel_by_id.contains_key(channel_id)
}
}

Expand Down Expand Up @@ -1735,8 +1760,8 @@ macro_rules! convert_chan_err {
},
ChannelError::Close(msg) => {
log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
update_maps_on_chan_removal!($self, &$channel.context);
let shutdown_res = $channel.context.force_shutdown(true);
update_maps_on_chan_removal!($self, &$channel.context());
let shutdown_res = $channel.context_mut().force_shutdown(true);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
},
Expand Down Expand Up @@ -1806,7 +1831,7 @@ macro_rules! remove_channel {
($self: expr, $entry: expr) => {
{
let channel = $entry.remove_entry().1;
update_maps_on_chan_removal!($self, &channel.context);
update_maps_on_chan_removal!($self, &channel.context());
channel
}
}
Expand Down Expand Up @@ -1926,7 +1951,7 @@ macro_rules! handle_new_monitor_update {
ChannelMonitorUpdateStatus::PermanentFailure => {
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
log_bytes!($chan.context.channel_id()[..]));
update_maps_on_chan_removal!($self, &$chan.context);
update_maps_on_chan_removal!($self, &$chan.context());
let res = Err(MsgHandleErrInternal::from_finish_shutdown(
"ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
$chan.context.get_user_id(), $chan.context.force_shutdown(false),
Expand Down Expand Up @@ -2280,6 +2305,16 @@ where
peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
for (_channel_id, channel) in peer_state.inbound_v2_channel_by_id.iter() {
let details = ChannelDetails::from_channel_context(&channel.context.common, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
for (_channel_id, channel) in peer_state.outbound_v2_channel_by_id.iter() {
let details = ChannelDetails::from_channel_context(&channel.context.common, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
}
}
res
Expand Down Expand Up @@ -2537,6 +2572,20 @@ where
self.finish_force_close_channel(chan.context.force_shutdown(false));
// Prefunded channel has no update
(None, chan.context.get_counterparty_node_id())
} else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v2_channel_by_id.entry(channel_id.clone()) {
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.issue_channel_close_events(&chan.get().context.common, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.common.force_shutdown(false));
// Prefunded channel has no update
(None, chan.context.common.get_counterparty_node_id())
} else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v2_channel_by_id.entry(channel_id.clone()) {
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.issue_channel_close_events(&chan.get().context.common, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.common.force_shutdown(false));
// Prefunded channel has no update
(None, chan.context.common.get_counterparty_node_id())
} else {
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
}
Expand Down Expand Up @@ -5164,6 +5213,11 @@ where
num_unfunded_channels += 1;
}
}
for (_, chan) in peer.inbound_v2_channel_by_id.iter() {
if chan.context.common.minimum_depth().unwrap_or(1) != 0 {
num_unfunded_channels += 1;
}
}
num_unfunded_channels
}

Expand Down Expand Up @@ -7074,6 +7128,16 @@ where
self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
false
});
peer_state.inbound_v2_channel_by_id.retain(|_, chan| {
update_maps_on_chan_removal!(self, &chan.context.common);
self.issue_channel_close_events(&chan.context.common, ClosureReason::DisconnectedPeer);
false
});
peer_state.outbound_v2_channel_by_id.retain(|_, chan| {
update_maps_on_chan_removal!(self, &chan.context.common);
self.issue_channel_close_events(&chan.context.common, ClosureReason::DisconnectedPeer);
false
});
pending_msg_events.retain(|msg| {
match msg {
// V1 Channel Establishment
Expand Down Expand Up @@ -7157,6 +7221,8 @@ where
channel_by_id: HashMap::new(),
outbound_v1_channel_by_id: HashMap::new(),
inbound_v1_channel_by_id: HashMap::new(),
outbound_v2_channel_by_id: HashMap::new(),
inbound_v2_channel_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
Expand Down Expand Up @@ -7235,7 +7301,9 @@ where
let peer_state = &mut *peer_state_lock;
peer_state.channel_by_id.keys().cloned()
.chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
.chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
.chain(peer_state.inbound_v1_channel_by_id.keys().cloned())
.chain(peer_state.outbound_v2_channel_by_id.keys().cloned())
.chain(peer_state.inbound_v2_channel_by_id.keys().cloned()).collect()
};
for channel_id in channel_ids {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
Expand Down Expand Up @@ -8368,6 +8436,8 @@ where
channel_by_id,
outbound_v1_channel_by_id: HashMap::new(),
inbound_v1_channel_by_id: HashMap::new(),
outbound_v2_channel_by_id: HashMap::new(),
inbound_v2_channel_by_id: HashMap::new(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
Expand Down

0 comments on commit a22bd08

Please sign in to comment.