Skip to content

Commit

Permalink
Add HTLCsTimedOut closing reason
Browse files Browse the repository at this point in the history
  • Loading branch information
benthecarman committed Mar 18, 2024
1 parent 2c9dbb9 commit d8c63a5
Show file tree
Hide file tree
Showing 6 changed files with 84 additions and 20 deletions.
67 changes: 61 additions & 6 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ use crate::chain::Filter;
use crate::util::logger::{Logger, Record};
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
use crate::events::{Event, EventHandler};
use crate::events::{ClosureReason, Event, EventHandler};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};

use crate::prelude::*;
Expand Down Expand Up @@ -155,6 +155,17 @@ pub enum MonitorEvent {
/// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate),

/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel. Holds information about the channel and why it was closed.
HolderForceClosedWithInfo {
/// The reason the channel was closed.
reason: ClosureReason,
/// The funding outpoint of the channel.
outpoint: OutPoint,
/// The channel ID of the channel.
channel_id: ChannelId,
},

/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel.
HolderForceClosed(OutPoint),
Expand Down Expand Up @@ -184,6 +195,11 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
(2, monitor_update_id, required),
(4, channel_id, required),
},
(5, HolderForceClosedWithInfo) => {
(0, reason, upgradable_required),
(2, outpoint, required),
(4, channel_id, required),
},
;
(2, HTLCEvent),
(4, HolderForceClosed),
Expand Down Expand Up @@ -1059,6 +1075,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev {
MonitorEvent::HTLCEvent(_) => true,
MonitorEvent::HolderForceClosed(_) => true,
MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}).count() as u64).to_be_bytes())?;
for event in self.pending_monitor_events.iter() {
Expand All @@ -1068,6 +1085,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
upd.write(writer)?;
},
MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?,
// `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep
// backwards compatibility, we write a `HolderForceClosed` event along with the
// `HolderForceClosedWithInfo` event. This is deduplicated in the reader.
MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?,
_ => {}, // Covered in the TLV writes below
}
}
Expand Down Expand Up @@ -1099,10 +1120,23 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
self.lockdown_from_offchain.write(writer)?;
self.holder_tx_signed.write(writer)?;

// If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility.
let pending_monitor_events = match self.pending_monitor_events.iter().find(|ev| match ev {
MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}) {
Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => {
let mut pending_monitor_events = self.pending_monitor_events.clone();
pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint));
pending_monitor_events
}
_ => self.pending_monitor_events.clone(),
};

write_tlv_fields!(writer, {
(1, self.funding_spend_confirmed, option),
(3, self.htlcs_resolved_on_chain, required_vec),
(5, self.pending_monitor_events, required_vec),
(5, pending_monitor_events, required_vec),
(7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
Expand Down Expand Up @@ -2727,7 +2761,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}

fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
fn generate_claimable_outpoints_and_watch_outputs(&mut self, is_htlc_timeout: bool) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding_outp = HolderFundingOutput::build(
self.funding_redeemscript.clone(),
self.channel_value_satoshis,
Expand All @@ -2739,7 +2773,18 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
self.best_block.height, self.best_block.height
);
let mut claimable_outpoints = vec![commitment_package];
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
let reason = if is_htlc_timeout {
ClosureReason::HTLCsTimedOut
} else {
ClosureReason::HolderForceClosed
};
let event = MonitorEvent::HolderForceClosedWithInfo {
reason,
outpoint: self.funding_info.0,
channel_id: self.channel_id,
};
self.pending_monitor_events.push(event);

// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
Expand Down Expand Up @@ -2775,7 +2820,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
F::Target: FeeEstimator,
L::Target: Logger,
{
let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(false);
self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
fee_estimator, logger
Expand Down Expand Up @@ -3778,7 +3823,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {

let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(true);
claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs);
}
Expand Down Expand Up @@ -4605,6 +4650,16 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
(19, channel_id, option),
});

// `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
// events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
if let Some(ref mut pending_monitor_events) = pending_monitor_events {
if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) &&
pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. }))
{
pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_)));
}
}

// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
// wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to
// give them a chance to recognize the spendable output.
Expand Down
6 changes: 5 additions & 1 deletion lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,8 @@ pub enum ClosureReason {
/// Another channel in the same funding batch closed before the funding transaction
/// was ready to be broadcast.
FundingBatchClosure,
/// One of our HTLCs timed out in a channel, causing us to force close the channel.
HTLCsTimedOut,
}

impl core::fmt::Display for ClosureReason {
Expand All @@ -241,7 +243,7 @@ impl core::fmt::Display for ClosureReason {
ClosureReason::CounterpartyForceClosed { peer_msg } => {
f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg))
},
ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"),
ClosureReason::HolderForceClosed => f.write_str("user force-closed the channel"),
ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"),
ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"),
ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"),
Expand All @@ -255,6 +257,7 @@ impl core::fmt::Display for ClosureReason {
ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"),
ClosureReason::HTLCsTimedOut => f.write_str("htlcs on the channel timed out"),
}
}
}
Expand All @@ -272,6 +275,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
(15, FundingBatchClosure) => {},
(17, CounterpartyInitiatedCooperativeClosure) => {},
(19, LocallyInitiatedCooperativeClosure) => {},
(21, HTLCsTimedOut) => {},
);

/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
Expand Down
11 changes: 8 additions & 3 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7374,7 +7374,7 @@ where
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
MonitorEvent::HolderForceClosed(_funding_outpoint) => {
MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
Expand All @@ -7392,7 +7392,12 @@ where
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
reason
} else {
ClosureReason::HolderForceClosed
};
failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
Expand All @@ -7401,7 +7406,7 @@ where
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
},
});
}
Expand Down
12 changes: 6 additions & 6 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2417,7 +2417,7 @@ fn channel_monitor_network_test() {
}
check_added_monitors!(nodes[4], 1);
test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);

mine_transaction(&nodes[4], &node_txn[0]);
check_preimage_claim(&nodes[4], &node_txn);
Expand All @@ -2430,7 +2430,7 @@ fn channel_monitor_network_test() {

assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
Ok(ChannelMonitorUpdateStatus::Completed));
check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
}

#[test]
Expand Down Expand Up @@ -5682,7 +5682,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
}

fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
Expand Down Expand Up @@ -5713,7 +5713,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
}

fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
Expand Down Expand Up @@ -5759,7 +5759,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
Expand Down Expand Up @@ -8654,7 +8654,7 @@ fn test_concurrent_monitor_claim() {
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
[nodes[1].node.get_our_node_id()], 100000);
watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1);
Expand Down
4 changes: 2 additions & 2 deletions lightning/src/ln/monitor_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1188,14 +1188,14 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_
assert!(failed_payments.is_empty());
if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); }
match &events[1] {
Event::ChannelClosed { reason: ClosureReason::HolderForceClosed, .. } => {},
Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut, .. } => {},
_ => panic!(),
}

connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000);
check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);

// Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only
// lists the two on-chain timeout-able HTLCs as claimable balances.
Expand Down
4 changes: 2 additions & 2 deletions lightning/src/ln/reorg_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,7 @@ fn test_set_outpoints_partial_claiming() {
// Connect blocks on node B
connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000);
check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
check_added_monitors!(nodes[1], 1);
// Verify node B broadcast 2 HTLC-timeout txn
let partial_claim_tx = {
Expand Down Expand Up @@ -807,7 +807,7 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
check_closed_event(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, &[nodes[1].node.get_our_node_id()], 100_000);

{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
Expand Down

0 comments on commit d8c63a5

Please sign in to comment.