diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 81ba30ffb83..175b0e777e8 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -511,7 +511,7 @@ where C::Target: chain::Filter, } - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor { self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 8b74264a1aa..2f3d0594b41 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -775,10 +775,7 @@ impl Readable for IrrevocablyResolvedHTLC { /// returned block hash and the the current chain and then reconnecting blocks to get to the /// best chain) upon deserializing the object! pub struct ChannelMonitor { - #[cfg(test)] pub(crate) inner: Mutex>, - #[cfg(not(test))] - pub(super) inner: Mutex>, } impl Clone for ChannelMonitor where Signer: Clone { @@ -877,9 +874,9 @@ pub(crate) struct ChannelMonitorImpl { // Obviously Correct (tm) if we just keep track of them explicitly. outputs_to_watch: HashMap>, - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] pub onchain_tx_handler: OnchainTxHandler, - #[cfg(not(test))] + #[cfg(not(any(test, feature = "_test_utils")))] onchain_tx_handler: OnchainTxHandler, // This is set when the Channel[Manager] generated a ChannelMonitorUpdate which indicated the @@ -2498,7 +2495,7 @@ macro_rules! fail_unbroadcast_htlcs { // witness length match (ie is 136 bytes long). We generate one here which we also use in some // in-line tests later. -#[cfg(test)] +#[cfg(any(test, feature = "_test_utils"))] pub fn deliberately_bogus_accepted_htlc_witness_program() -> Vec { use bitcoin::blockdata::opcodes; let mut ret = [opcodes::all::OP_NOP.to_u8(); 136]; @@ -2510,7 +2507,7 @@ pub fn deliberately_bogus_accepted_htlc_witness_program() -> Vec { Vec::from(&ret[..]) } -#[cfg(test)] +#[cfg(any(test, feature = "_test_utils"))] pub fn deliberately_bogus_accepted_htlc_witness() -> Vec> { vec![Vec::new(), Vec::new(), Vec::new(), Vec::new(), deliberately_bogus_accepted_htlc_witness_program().into()].into() } diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index f6eea9d6896..6f61005ce65 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -246,10 +246,7 @@ pub struct OnchainTxHandler { // Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by // us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved. // Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial) - #[cfg(test)] // Used in functional_test to verify sanitization pub(crate) pending_claim_requests: HashMap, - #[cfg(not(test))] - pending_claim_requests: HashMap, // Used to track external events that need to be forwarded to the `ChainMonitor`. This `Vec` // essentially acts as an insertion-ordered `HashMap` – there should only ever be one occurrence @@ -269,10 +266,7 @@ pub struct OnchainTxHandler { // block height, and are immutable until the outpoint has enough confirmations to meet our // [`ANTI_REORG_DELAY`]. The initial confirmation block height is used to remove the entry if // the block gets disconnected. - #[cfg(test)] // Used in functional_test to verify sanitization - pub claimable_outpoints: HashMap, - #[cfg(not(test))] - claimable_outpoints: HashMap, + pub(crate) claimable_outpoints: HashMap, locktimed_packages: BTreeMap>, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0d76a2037c6..50cfe4d74f4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -57,7 +57,8 @@ use crate::sign::type_resolver::ChannelSignerType; use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint}; -#[cfg(test)] +#[cfg(any(test, feature = "_test_utils"))] +#[allow(unused)] pub struct ChannelValueStat { pub value_to_self_msat: u64, pub channel_value_msat: u64, @@ -6171,12 +6172,12 @@ impl Channel where self.context.cur_counterparty_commitment_transaction_number + 2 } - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] pub fn get_signer(&self) -> &ChannelSignerType { &self.context.holder_signer } - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] pub fn get_value_stat(&self) -> ChannelValueStat { ChannelValueStat { value_to_self_msat: self.context.value_to_self_msat, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 67f34369853..8b28ce9c68d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1889,10 +1889,7 @@ where router: R, /// See `ChannelManager` struct-level documentation for lock order requirements. - #[cfg(test)] pub(super) best_block: RwLock, - #[cfg(not(test))] - best_block: RwLock, secp_ctx: Secp256k1, /// Storage for PaymentSecrets and any requirements on future inbound payments before we will @@ -1981,9 +1978,6 @@ where /// required to access the channel with the `counterparty_node_id`. /// /// See `ChannelManager` struct-level documentation for lock order requirements. - #[cfg(not(test))] - outpoint_to_peer: Mutex>, - #[cfg(test)] pub(crate) outpoint_to_peer: Mutex>, /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. @@ -4567,7 +4561,7 @@ where Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> { self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_, tx| { Ok(OutPoint { txid: tx.txid(), index: output_index }) diff --git a/lightning/src/ln/features.rs b/lightning/src/ln/features.rs index ff91654a3f7..3dc37ecf601 100644 --- a/lightning/src/ln/features.rs +++ b/lightning/src/ln/features.rs @@ -927,7 +927,7 @@ impl Features { } impl Features { - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] pub(crate) fn clear_wumbo(mut self) -> Self { ::clear_bits(&mut self.flags); self diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 73a926d44a6..fad563f43a9 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -937,7 +937,7 @@ macro_rules! get_channel_ref { } } -#[cfg(test)] +#[cfg(any(test, feature = "_test_utils"))] macro_rules! get_feerate { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { @@ -949,7 +949,7 @@ macro_rules! get_feerate { } } -#[cfg(test)] +#[cfg(any(test, feature = "_test_utils"))] macro_rules! get_channel_type_features { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { @@ -3473,7 +3473,7 @@ pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec {{ let peer_state_lock = $node.node.per_peer_state.read().unwrap(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2f7e6353a06..ba649b0cdf7 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -123,8 +123,8 @@ pub fn test_insane_channel_opens() { insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg }); } -#[test] -fn test_funding_exceeds_no_wumbo_limit() { +#[xtest(feature = "_test_utils")] +pub fn test_funding_exceeds_no_wumbo_limit() { // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to // them. use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO; @@ -209,14 +209,14 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { } } -#[test] -fn test_counterparty_no_reserve() { +#[xtest(feature = "_test_utils")] +pub fn test_counterparty_no_reserve() { do_test_counterparty_no_reserve(true); do_test_counterparty_no_reserve(false); } -#[test] -fn test_async_inbound_update_fee() { +#[xtest(feature = "_test_utils")] +pub fn test_async_inbound_update_fee() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -330,8 +330,8 @@ fn test_async_inbound_update_fee() { check_added_monitors!(nodes[1], 1); } -#[test] -fn test_update_fee_unordered_raa() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fee_unordered_raa() { // Just the intro to the previous test followed by an out-of-order RAA (which caused a // crash in an earlier version of the update_fee patch) let chanmon_cfgs = create_chanmon_cfgs(2); @@ -389,8 +389,8 @@ fn test_update_fee_unordered_raa() { // We can't continue, sadly, because our (1) now has a bogus signature } -#[test] -fn test_multi_flight_update_fee() { +#[xtest(feature = "_test_utils")] +pub fn test_multi_flight_update_fee() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -574,8 +574,8 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); } -#[test] -fn test_sanity_on_in_flight_opens() { +#[xtest(feature = "_test_utils")] +pub fn test_sanity_on_in_flight_opens() { do_test_sanity_on_in_flight_opens(0); do_test_sanity_on_in_flight_opens(0 | 0b1000_0000); do_test_sanity_on_in_flight_opens(1); @@ -596,8 +596,8 @@ fn test_sanity_on_in_flight_opens() { do_test_sanity_on_in_flight_opens(8 | 0b1000_0000); } -#[test] -fn test_update_fee_vanilla() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fee_vanilla() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -639,8 +639,8 @@ fn test_update_fee_vanilla() { check_added_monitors!(nodes[1], 1); } -#[test] -fn test_update_fee_that_funder_cannot_afford() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fee_that_funder_cannot_afford() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -773,8 +773,8 @@ fn test_update_fee_that_funder_cannot_afford() { [nodes[0].node.get_our_node_id()], channel_value); } -#[test] -fn test_update_fee_with_fundee_update_add_htlc() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fee_with_fundee_update_add_htlc() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -873,8 +873,8 @@ fn test_update_fee_with_fundee_update_add_htlc() { check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_update_fee() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fee() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -1115,8 +1115,8 @@ pub fn fake_network_test() { check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn holding_cell_htlc_counting() { +#[xtest(feature = "_test_utils")] +pub fn holding_cell_htlc_counting() { // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs // to ensure we don't end up with HTLCs sitting around in our holding cell for several // commitment dance rounds. @@ -1233,8 +1233,8 @@ fn holding_cell_htlc_counting() { send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); } -#[test] -fn duplicate_htlc_test() { +#[xtest(feature = "_test_utils")] +pub fn duplicate_htlc_test() { // Test that we accept duplicate payment_hash HTLCs across the network and that // claiming/failing them are all separate and don't affect each other let chanmon_cfgs = create_chanmon_cfgs(6); @@ -1262,8 +1262,8 @@ fn duplicate_htlc_test() { claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage); } -#[test] -fn test_duplicate_htlc_different_direction_onchain() { +#[xtest(feature = "_test_utils")] +pub fn test_duplicate_htlc_different_direction_onchain() { // Test that ChannelMonitor doesn't generate 2 preimage txn // when we have 2 HTLCs with same preimage that go across a node // in opposite directions, even with the same payment secret. @@ -1353,8 +1353,8 @@ fn test_duplicate_htlc_different_direction_onchain() { } } -#[test] -fn test_basic_channel_reserve() { +#[xtest(feature = "_test_utils")] +pub fn test_basic_channel_reserve() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -1384,8 +1384,8 @@ fn test_basic_channel_reserve() { send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); } -#[test] -fn test_fee_spike_violation_fails_htlc() { +#[xtest(feature = "_test_utils")] +pub fn test_fee_spike_violation_fails_htlc() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -1533,8 +1533,8 @@ fn test_fee_spike_violation_fails_htlc() { check_added_monitors!(nodes[1], 2); } -#[test] -fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { +#[xtest(feature = "_test_utils")] +pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let mut chanmon_cfgs = create_chanmon_cfgs(2); // Set the fee rate for the channel very high, to the point where the fundee // sending any above-dust amount would result in a channel reserve violation. @@ -1568,8 +1568,8 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } -#[test] -fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { +#[xtest(feature = "_test_utils")] +pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let mut chanmon_cfgs = create_chanmon_cfgs(2); let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -1624,8 +1624,8 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { +#[xtest(feature = "_test_utils")] +pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // Test that if we receive many dust HTLCs over an outbound channel, they don't count when // calculating our commitment transaction fee (this was previously broken). let mut chanmon_cfgs = create_chanmon_cfgs(2); @@ -1666,8 +1666,8 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { ), true, APIError::ChannelUnavailable { .. }, {}); } -#[test] -fn test_chan_init_feerate_unaffordability() { +#[xtest(feature = "_test_utils")] +pub fn test_chan_init_feerate_unaffordability() { // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to // channel reserve and feerate requirements. let mut chanmon_cfgs = create_chanmon_cfgs(2); @@ -1703,8 +1703,8 @@ fn test_chan_init_feerate_unaffordability() { } } -#[test] -fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { +#[xtest(feature = "_test_utils")] +pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { // Test that if we receive many dust HTLCs over an inbound channel, they don't count when // calculating our counterparty's commitment transaction fee (this was previously broken). let chanmon_cfgs = create_chanmon_cfgs(2); @@ -1733,8 +1733,8 @@ fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { route_payment(&nodes[0], &[&nodes[1]], payment_amt); } -#[test] -fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { +#[xtest(feature = "_test_utils")] +pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -1803,8 +1803,8 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_inbound_outbound_capacity_is_not_zero() { +#[xtest(feature = "_test_utils")] +pub fn test_inbound_outbound_capacity_is_not_zero() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -1828,8 +1828,8 @@ fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &Chan (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 } -#[test] -fn test_channel_reserve_holding_cell_htlcs() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_reserve_holding_cell_htlcs() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. @@ -2084,8 +2084,8 @@ fn test_channel_reserve_holding_cell_htlcs() { assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); } -#[test] -fn channel_reserve_in_flight_removes() { +#[xtest(feature = "_test_utils")] +pub fn channel_reserve_in_flight_removes() { // In cases where one side claims an HTLC, it thinks it has additional available funds that it // can send to its counterparty, but due to update ordering, the other side may not yet have // considered those HTLCs fully removed. @@ -2239,8 +2239,8 @@ fn channel_reserve_in_flight_removes() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } -#[test] -fn channel_monitor_network_test() { +#[xtest(feature = "_test_utils")] +pub fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and // tests that ChannelMonitor is able to recover from various states. let chanmon_cfgs = create_chanmon_cfgs(5); @@ -2431,8 +2431,8 @@ fn channel_monitor_network_test() { check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000); } -#[test] -fn test_justice_tx_htlc_timeout() { +#[xtest(feature = "_test_utils")] +pub fn test_justice_tx_htlc_timeout() { // Test justice txn built on revoked HTLC-Timeout tx, against both sides let mut alice_config = UserConfig::default(); alice_config.channel_handshake_config.announced_channel = true; @@ -2494,8 +2494,8 @@ fn test_justice_tx_htlc_timeout() { assert_eq!(nodes[1].node.list_channels().len(), 0); } -#[test] -fn test_justice_tx_htlc_success() { +#[xtest(feature = "_test_utils")] +pub fn test_justice_tx_htlc_success() { // Test justice txn built on revoked HTLC-Success tx, against both sides let mut alice_config = UserConfig::default(); alice_config.channel_handshake_config.announced_channel = true; @@ -2551,8 +2551,8 @@ fn test_justice_tx_htlc_success() { assert_eq!(nodes[1].node.list_channels().len(), 0); } -#[test] -fn revoked_output_claim() { +#[xtest(feature = "_test_utils")] +pub fn revoked_output_claim() { // Simple test to ensure a node will claim a revoked output when a stale remote commitment // transaction is broadcast by its counterparty let chanmon_cfgs = create_chanmon_cfgs(2); @@ -2584,8 +2584,8 @@ fn revoked_output_claim() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_forming_justice_tx_from_monitor_updates() { +#[xtest(feature = "_test_utils")] +pub fn test_forming_justice_tx_from_monitor_updates() { do_test_forming_justice_tx_from_monitor_updates(true); do_test_forming_justice_tx_from_monitor_updates(false); } @@ -2650,8 +2650,8 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: } -#[test] -fn claim_htlc_outputs_shared_tx() { +#[xtest(feature = "_test_utils")] +pub fn claim_htlc_outputs_shared_tx() { // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; @@ -2717,8 +2717,8 @@ fn claim_htlc_outputs_shared_tx() { assert_eq!(nodes[1].node.list_channels().len(), 0); } -#[test] -fn claim_htlc_outputs_single_tx() { +#[xtest(feature = "_test_utils")] +pub fn claim_htlc_outputs_single_tx() { // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; @@ -2801,8 +2801,8 @@ fn claim_htlc_outputs_single_tx() { assert_eq!(nodes[1].node.list_channels().len(), 0); } -#[test] -fn test_htlc_on_chain_success() { +#[xtest(feature = "_test_utils")] +pub fn test_htlc_on_chain_success() { // Test that in case of a unilateral close onchain, we detect the state of output and pass // the preimage backward accordingly. So here we test that ChannelManager is // broadcasting the right event to other nodes in payment path. @@ -3146,15 +3146,15 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); } -#[test] -fn test_htlc_on_chain_timeout() { +#[xtest(feature = "_test_utils")] +pub fn test_htlc_on_chain_timeout() { do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks); do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks); do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen); } -#[test] -fn test_simple_commitment_revoked_fail_backward() { +#[xtest(feature = "_test_utils")] +pub fn test_simple_commitment_revoked_fail_backward() { // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx // and fail backward accordingly. @@ -3445,24 +3445,24 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(failed_htlcs.contains(&third_payment_hash.0)); } -#[test] -fn test_commitment_revoked_fail_backward_exhaustive_a() { +#[xtest(feature = "_test_utils")] +pub fn test_commitment_revoked_fail_backward_exhaustive_a() { do_test_commitment_revoked_fail_backward_exhaustive(false, true, false); do_test_commitment_revoked_fail_backward_exhaustive(true, true, false); do_test_commitment_revoked_fail_backward_exhaustive(false, false, false); do_test_commitment_revoked_fail_backward_exhaustive(true, false, false); } -#[test] -fn test_commitment_revoked_fail_backward_exhaustive_b() { +#[xtest(feature = "_test_utils")] +pub fn test_commitment_revoked_fail_backward_exhaustive_b() { do_test_commitment_revoked_fail_backward_exhaustive(false, true, true); do_test_commitment_revoked_fail_backward_exhaustive(true, true, true); do_test_commitment_revoked_fail_backward_exhaustive(false, false, true); do_test_commitment_revoked_fail_backward_exhaustive(true, false, true); } -#[test] -fn fail_backward_pending_htlc_upon_channel_failure() { +#[xtest(feature = "_test_utils")] +pub fn fail_backward_pending_htlc_upon_channel_failure() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -3545,8 +3545,8 @@ fn fail_backward_pending_htlc_upon_channel_failure() { check_added_monitors!(nodes[0], 1); } -#[test] -fn test_htlc_ignore_latest_remote_commitment() { +#[xtest(feature = "_test_utils")] +pub fn test_htlc_ignore_latest_remote_commitment() { // Test that HTLC transactions spending the latest remote commitment transaction are simply // ignored if we cannot claim them. This originally tickled an invalid unwrap(). let chanmon_cfgs = create_chanmon_cfgs(2); @@ -3584,8 +3584,8 @@ fn test_htlc_ignore_latest_remote_commitment() { connect_block(&nodes[1], &block); } -#[test] -fn test_force_close_fail_back() { +#[xtest(feature = "_test_utils")] +pub fn test_force_close_fail_back() { // Check which HTLCs are failed-backwards on channel force-closure let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); @@ -3663,8 +3663,8 @@ fn test_force_close_fail_back() { check_spends!(htlc_tx, commitment_tx); } -#[test] -fn test_dup_events_on_peer_disconnect() { +#[xtest(feature = "_test_utils")] +pub fn test_dup_events_on_peer_disconnect() { // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do // not generate a corresponding duplicative PaymentSent event. This did not use to be the case // as we used to generate the event immediately upon receipt of the payment preimage in the @@ -3694,8 +3694,8 @@ fn test_dup_events_on_peer_disconnect() { expect_payment_path_successful!(nodes[0]); } -#[test] -fn test_peer_disconnected_before_funding_broadcasted() { +#[xtest(feature = "_test_utils")] +pub fn test_peer_disconnected_before_funding_broadcasted() { // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects // before the funding transaction has been broadcasted, and doesn't reconnect back within time. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -3744,8 +3744,8 @@ fn test_peer_disconnected_before_funding_broadcasted() { , [nodes[0].node.get_our_node_id()], 1000000); } -#[test] -fn test_simple_peer_disconnect() { +#[xtest(feature = "_test_utils")] +pub fn test_simple_peer_disconnect() { // Test that we can reconnect when there are no lost messages let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); @@ -4074,24 +4074,24 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } -#[test] -fn test_drop_messages_peer_disconnect_a() { +#[xtest(feature = "_test_utils")] +pub fn test_drop_messages_peer_disconnect_a() { do_test_drop_messages_peer_disconnect(0, true); do_test_drop_messages_peer_disconnect(0, false); do_test_drop_messages_peer_disconnect(1, false); do_test_drop_messages_peer_disconnect(2, false); } -#[test] -fn test_drop_messages_peer_disconnect_b() { +#[xtest(feature = "_test_utils")] +pub fn test_drop_messages_peer_disconnect_b() { do_test_drop_messages_peer_disconnect(3, false); do_test_drop_messages_peer_disconnect(4, false); do_test_drop_messages_peer_disconnect(5, false); do_test_drop_messages_peer_disconnect(6, false); } -#[test] -fn test_channel_ready_without_best_block_updated() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_ready_without_best_block_updated() { // Previously, if we were offline when a funding transaction was locked in, and then we came // back online, calling best_block_updated once followed by transactions_confirmed, we'd not // generate a channel_ready until a later best_block_updated. This tests that we generate the @@ -4116,8 +4116,8 @@ fn test_channel_ready_without_best_block_updated() { nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready); } -#[test] -fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -4148,8 +4148,8 @@ fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() { nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready); } -#[test] -fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -4183,8 +4183,8 @@ fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() { nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready); } -#[test] -fn test_drop_messages_peer_disconnect_dual_htlc() { +#[xtest(feature = "_test_utils")] +pub fn test_drop_messages_peer_disconnect_dual_htlc() { // Test that we can handle reconnecting when both sides of a channel have pending // commitment_updates when we disconnect. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -4397,8 +4397,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]); } -#[test] -fn test_htlc_timeout() { +#[xtest(feature = "_test_utils")] +pub fn test_htlc_timeout() { do_test_htlc_timeout(true); do_test_htlc_timeout(false); } @@ -4461,8 +4461,8 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { } } -#[test] -fn test_holding_cell_htlc_add_timeouts() { +#[xtest(feature = "_test_utils")] +pub fn test_holding_cell_htlc_add_timeouts() { do_test_holding_cell_htlc_add_timeouts(false); do_test_holding_cell_htlc_add_timeouts(true); } @@ -4495,8 +4495,8 @@ macro_rules! check_spendable_outputs { } } -#[test] -fn test_claim_sizeable_push_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_claim_sizeable_push_msat() { // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -4523,8 +4523,8 @@ fn test_claim_sizeable_push_msat() { assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); } -#[test] -fn test_claim_on_remote_sizeable_push_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_claim_on_remote_sizeable_push_msat() { // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and // to_remote output is encumbered by a P2WPKH let chanmon_cfgs = create_chanmon_cfgs(2); @@ -4554,8 +4554,8 @@ fn test_claim_on_remote_sizeable_push_msat() { check_spends!(spend_txn[0], node_txn[0]); } -#[test] -fn test_claim_on_remote_revoked_sizeable_push_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_claim_on_remote_revoked_sizeable_push_msat() { // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and // to_remote output is encumbered by a P2WPKH @@ -4587,8 +4587,8 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs } -#[test] -fn test_static_spendable_outputs_preimage_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_static_spendable_outputs_preimage_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -4634,8 +4634,8 @@ fn test_static_spendable_outputs_preimage_tx() { check_spends!(spend_txn[0], node_txn[0]); } -#[test] -fn test_static_spendable_outputs_timeout_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_static_spendable_outputs_timeout_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -4681,8 +4681,8 @@ fn test_static_spendable_outputs_timeout_tx() { check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs } -#[test] -fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -4716,8 +4716,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { check_spends!(spend_txn[0], node_txn[0]); } -#[test] -fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -4781,8 +4781,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { check_spends!(spend_txn[0], node_txn[1]); } -#[test] -fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -4858,8 +4858,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs } -#[test] -fn test_onchain_to_onchain_claim() { +#[xtest(feature = "_test_utils")] +pub fn test_onchain_to_onchain_claim() { // Test that in case of channel closure, we detect the state of output and claim HTLC // on downstream peer's remote commitment tx. // First, have C claim an HTLC against its own latest commitment transaction. @@ -4976,8 +4976,8 @@ fn test_onchain_to_onchain_claim() { check_added_monitors!(nodes[1], 1); } -#[test] -fn test_duplicate_payment_hash_one_failure_one_success() { +#[xtest(feature = "_test_utils")] +pub fn test_duplicate_payment_hash_one_failure_one_success() { // Topology : A --> B --> C --> D // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim // Note that because C will refuse to generate two payment secrets for the same payment hash, @@ -5122,8 +5122,8 @@ fn test_duplicate_payment_hash_one_failure_one_success() { expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); } -#[test] -fn test_dynamic_spendable_outputs_local_htlc_success_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5464,25 +5464,25 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 }); } -#[test] -fn test_fail_backwards_latest_remote_announce_a() { +#[xtest(feature = "_test_utils")] +pub fn test_fail_backwards_latest_remote_announce_a() { do_test_fail_backwards_unrevoked_remote_announce(false, true); } -#[test] -fn test_fail_backwards_latest_remote_announce_b() { +#[xtest(feature = "_test_utils")] +pub fn test_fail_backwards_latest_remote_announce_b() { do_test_fail_backwards_unrevoked_remote_announce(true, true); } -#[test] -fn test_fail_backwards_previous_remote_announce() { +#[xtest(feature = "_test_utils")] +pub fn test_fail_backwards_previous_remote_announce() { do_test_fail_backwards_unrevoked_remote_announce(false, false); // Note that true, true doesn't make sense as it implies we announce a revoked state, which is // tested for in test_commitment_revoked_fail_backward_exhaustive() } -#[test] -fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5529,8 +5529,8 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32); } -#[test] -fn test_key_derivation_params() { +#[xtest(feature = "_test_utils")] +pub fn test_key_derivation_params() { // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key // manager rotation to test that `channel_keys_id` returned in // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to @@ -5617,8 +5617,8 @@ fn test_key_derivation_params() { spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32); } -#[test] -fn test_static_output_closing_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_static_output_closing_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5774,8 +5774,8 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we // assume they are handled the same across all six cases, as both outbound and inbound failures are // tested for at least one of the cases in other tests. -#[test] -fn htlc_claim_single_commitment_only_a() { +#[xtest(feature = "_test_utils")] +pub fn htlc_claim_single_commitment_only_a() { do_htlc_claim_local_commitment_only(true); do_htlc_claim_local_commitment_only(false); @@ -5783,17 +5783,17 @@ fn htlc_claim_single_commitment_only_a() { do_htlc_claim_current_remote_commitment_only(false); } -#[test] -fn htlc_claim_single_commitment_only_b() { +#[xtest(feature = "_test_utils")] +pub fn htlc_claim_single_commitment_only_b() { do_htlc_claim_previous_remote_commitment_only(true, false); do_htlc_claim_previous_remote_commitment_only(false, false); do_htlc_claim_previous_remote_commitment_only(true, true); do_htlc_claim_previous_remote_commitment_only(false, true); } -#[test] +#[xtest(feature = "_test_utils")] #[should_panic] -fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic +pub fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5816,8 +5816,8 @@ fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on i assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err()); } -#[test] -fn bolt2_open_channel_sending_node_checks_part2() { +#[xtest(feature = "_test_utils")] +pub fn bolt2_open_channel_sending_node_checks_part2() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5861,8 +5861,8 @@ fn bolt2_open_channel_sending_node_checks_part2() { assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok()); } -#[test] -fn bolt2_open_channel_sane_dust_limit() { +#[xtest(feature = "_test_utils")] +pub fn bolt2_open_channel_sane_dust_limit() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5890,8 +5890,8 @@ fn bolt2_open_channel_sane_dust_limit() { // originated from our node, its failure is surfaced to the user. We trigger this failure to // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC // is no longer affordable once it's freed. -#[test] -fn test_fail_holding_cell_htlc_upon_free() { +#[xtest(feature = "_test_utils")] +pub fn test_fail_holding_cell_htlc_upon_free() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5970,8 +5970,8 @@ fn test_fail_holding_cell_htlc_upon_free() { // Test that if multiple HTLCs are released from the holding cell and one is // valid but the other is no longer valid upon release, the valid HTLC can be // successfully completed while the other one fails as expected. -#[test] -fn test_free_and_fail_holding_cell_htlcs() { +#[xtest(feature = "_test_utils")] +pub fn test_free_and_fail_holding_cell_htlcs() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -6095,8 +6095,8 @@ fn test_free_and_fail_holding_cell_htlcs() { // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable // once it's freed. -#[test] -fn test_fail_holding_cell_htlc_upon_free_multihop() { +#[xtest(feature = "_test_utils")] +pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); // Avoid having to include routing fees in calculations @@ -6224,8 +6224,8 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { check_added_monitors!(nodes[0], 1); } -#[test] -fn test_payment_route_reaching_same_channel_twice() { +#[xtest(feature = "_test_utils")] +pub fn test_payment_route_reaching_same_channel_twice() { //A route should not go through the same channel twice //It is enforced when constructing a route. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6252,8 +6252,8 @@ fn test_payment_route_reaching_same_channel_twice() { // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve. //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO. -#[test] -fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these) let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6270,8 +6270,8 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } -#[test] -fn test_update_add_htlc_bolt2_sender_zero_value_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { //BOLT2 Requirement: MUST offer amount_msat greater than 0. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6290,8 +6290,8 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1); } -#[test] -fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { //BOLT2 Requirement: MUST offer amount_msat greater than 0. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6314,8 +6314,8 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000. //It is enforced when constructing a route. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6334,8 +6334,8 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { assert_eq!(err, &"Channel CLTV overflowed?")); } -#[test] -fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() { //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC. //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0. //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer. @@ -6379,8 +6379,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } -#[test] -fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6405,8 +6405,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { } // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message. -#[test] -fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6435,8 +6435,8 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6471,8 +6471,8 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6517,8 +6517,8 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6541,8 +6541,8 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000); } -#[test] -fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -6565,8 +6565,8 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { +#[xtest(feature = "_test_utils")] +pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection. // We test this by first testing that that repeated HTLCs pass commitment signature checks // after disconnect and that non-sequential htlc_ids result in a channel failure. @@ -6617,8 +6617,8 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6649,8 +6649,8 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6681,8 +6681,8 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() { //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6713,8 +6713,8 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6756,8 +6756,8 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_update_fulfill_htlc_bolt2_wrong_preimage() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6799,8 +6799,8 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() { //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6846,8 +6846,8 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000); } -#[test] -fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() { +#[xtest(feature = "_test_utils")] +pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() { //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc: // * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion. @@ -6924,8 +6924,8 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda check_added_monitors!(nodes[1], 1); } -#[test] -fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -7094,8 +7094,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { } } -#[test] -fn test_failure_delay_dust_htlc_local_commitment() { +#[xtest(feature = "_test_utils")] +pub fn test_failure_delay_dust_htlc_local_commitment() { do_test_failure_delay_dust_htlc_local_commitment(true); do_test_failure_delay_dust_htlc_local_commitment(false); } @@ -7177,15 +7177,15 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { } } -#[test] -fn test_sweep_outbound_htlc_failure_update() { +#[xtest(feature = "_test_utils")] +pub fn test_sweep_outbound_htlc_failure_update() { do_test_sweep_outbound_htlc_failure_update(false, true); do_test_sweep_outbound_htlc_failure_update(false, false); do_test_sweep_outbound_htlc_failure_update(true, false); } -#[test] -fn test_user_configurable_csv_delay() { +#[xtest(feature = "_test_utils")] +pub fn test_user_configurable_csv_delay() { // We test our channel constructors yield errors when we pass them absurd csv delay let mut low_our_to_self_config = UserConfig::default(); @@ -7256,8 +7256,8 @@ fn test_user_configurable_csv_delay() { } else { assert!(false); } } -#[test] -fn test_check_htlc_underpaying() { +#[xtest(feature = "_test_utils")] +pub fn test_check_htlc_underpaying() { // Send payment through A -> B but A is maliciously // sending a probe payment (i.e less than expected value0 // to B, B should refuse payment. @@ -7322,8 +7322,8 @@ fn test_check_htlc_underpaying() { expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]); } -#[test] -fn test_announce_disable_channels() { +#[xtest(feature = "_test_utils")] +pub fn test_announce_disable_channels() { // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate. @@ -7414,8 +7414,8 @@ fn test_announce_disable_channels() { assert!(chans_disabled.is_empty()); } -#[test] -fn test_bump_penalty_txn_on_revoked_commitment() { +#[xtest(feature = "_test_utils")] +pub fn test_bump_penalty_txn_on_revoked_commitment() { // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure // we're able to claim outputs on revoked commitment transaction before timelocks expiration @@ -7519,8 +7519,8 @@ fn test_bump_penalty_txn_on_revoked_commitment() { nodes[1].node.get_and_clear_pending_msg_events(); } -#[test] -fn test_bump_penalty_txn_on_revoked_htlcs() { +#[xtest(feature = "_test_utils")] +pub fn test_bump_penalty_txn_on_revoked_htlcs() { // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure // we're able to claim outputs on revoked HTLC transactions before timelocks expiration @@ -7674,8 +7674,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { check_added_monitors!(nodes[0], 1); } -#[test] -fn test_bump_penalty_txn_on_remote_commitment() { +#[xtest(feature = "_test_utils")] +pub fn test_bump_penalty_txn_on_remote_commitment() { // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure // we're able to claim outputs on remote commitment transaction before timelocks expiration @@ -7777,8 +7777,8 @@ fn test_bump_penalty_txn_on_remote_commitment() { nodes[1].node.get_and_clear_pending_msg_events(); } -#[test] -fn test_counterparty_raa_skip_no_crash() { +#[xtest(feature = "_test_utils")] +pub fn test_counterparty_raa_skip_no_crash() { // Previously, if our counterparty sent two RAAs in a row without us having provided a // commitment transaction, we would have happily carried on and provided them the next // commitment transaction based on one RAA forward. This would probably eventually have led to @@ -7831,8 +7831,8 @@ fn test_counterparty_raa_skip_no_crash() { , [nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_bump_txn_sanitize_tracking_maps() { +#[xtest(feature = "_test_utils")] +pub fn test_bump_txn_sanitize_tracking_maps() { // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy, // verify we clean then right after expiration of ANTI_REORG_DELAY. @@ -7881,8 +7881,8 @@ fn test_bump_txn_sanitize_tracking_maps() { } } -#[test] -fn test_channel_conf_timeout() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_conf_timeout() { // Tests that, for inbound channels, we give up on them if the funding transaction does not // confirm within 2016 blocks, as recommended by BOLT 2. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -7917,8 +7917,8 @@ fn test_channel_conf_timeout() { } } -#[test] -fn test_override_channel_config() { +#[xtest(feature = "_test_utils")] +pub fn test_override_channel_config() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -7936,8 +7936,8 @@ fn test_override_channel_config() { assert_eq!(res.common_fields.to_self_delay, 200); } -#[test] -fn test_override_0msat_htlc_minimum() { +#[xtest(feature = "_test_utils")] +pub fn test_override_0msat_htlc_minimum() { let mut zero_config = UserConfig::default(); zero_config.channel_handshake_config.our_htlc_minimum_msat = 0; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -7954,8 +7954,8 @@ fn test_override_0msat_htlc_minimum() { assert_eq!(res.common_fields.htlc_minimum_msat, 1); } -#[test] -fn test_channel_update_has_correct_htlc_maximum_msat() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_update_has_correct_htlc_maximum_msat() { // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set. // Bolt 7 specifies that if present `htlc_maximum_msat`: // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to @@ -8006,8 +8006,8 @@ fn test_channel_update_has_correct_htlc_maximum_msat() { assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat); } -#[test] -fn test_manually_accept_inbound_channel_request() { +#[xtest(feature = "_test_utils")] +pub fn test_manually_accept_inbound_channel_request() { let mut manually_accept_conf = UserConfig::default(); manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -8056,8 +8056,8 @@ fn test_manually_accept_inbound_channel_request() { } } -#[test] -fn test_manually_reject_inbound_channel_request() { +#[xtest(feature = "_test_utils")] +pub fn test_manually_reject_inbound_channel_request() { let mut manually_accept_conf = UserConfig::default(); manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -8096,8 +8096,8 @@ fn test_manually_reject_inbound_channel_request() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } -#[test] -fn test_can_not_accept_inbound_channel_twice() { +#[xtest(feature = "_test_utils")] +pub fn test_can_not_accept_inbound_channel_twice() { let mut manually_accept_conf = UserConfig::default(); manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -8142,8 +8142,8 @@ fn test_can_not_accept_inbound_channel_twice() { } } -#[test] -fn test_can_not_accept_unknown_inbound_channel() { +#[xtest(feature = "_test_utils")] +pub fn test_can_not_accept_unknown_inbound_channel() { let chanmon_cfg = create_chanmon_cfgs(2); let node_cfg = create_node_cfgs(2, &chanmon_cfg); let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); @@ -8160,8 +8160,8 @@ fn test_can_not_accept_unknown_inbound_channel() { } } -#[test] -fn test_onion_value_mpp_set_calculation() { +#[xtest(feature = "_test_utils")] +pub fn test_onion_value_mpp_set_calculation() { // Test that we use the onion value `amt_to_forward` when // calculating whether we've reached the `total_msat` of an MPP // by having a routing node forward more than `amt_to_forward` @@ -8328,14 +8328,14 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage); } -#[test] -fn test_overshoot_mpp() { +#[xtest(feature = "_test_utils")] +pub fn test_overshoot_mpp() { do_test_overshoot_mpp(&[100_000, 101_000], 200_000); do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000); } -#[test] -fn test_simple_mpp() { +#[xtest(feature = "_test_utils")] +pub fn test_simple_mpp() { // Simple test of sending a multi-path payment. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); @@ -8360,8 +8360,8 @@ fn test_simple_mpp() { claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } -#[test] -fn test_preimage_storage() { +#[xtest(feature = "_test_utils")] +pub fn test_preimage_storage() { // Simple test of payment preimage storage allowing no client-side storage to claim payments let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -8399,8 +8399,8 @@ fn test_preimage_storage() { } } -#[test] -fn test_bad_secret_hash() { +#[xtest(feature = "_test_utils")] +pub fn test_bad_secret_hash() { // Simple test of unregistered payment hash/invalid payment secret handling let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -8465,8 +8465,8 @@ fn test_bad_secret_hash() { expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data); } -#[test] -fn test_update_err_monitor_lockdown() { +#[xtest(feature = "_test_utils")] +pub fn test_update_err_monitor_lockdown() { // Our monitor will lock update of local commitment transaction if a broadcastion condition // has been fulfilled (either force-close from Channel or block height requiring a HTLC- // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus @@ -8539,8 +8539,8 @@ fn test_update_err_monitor_lockdown() { assert_eq!(events.len(), 1); } -#[test] -fn test_concurrent_monitor_claim() { +#[xtest(feature = "_test_utils")] +pub fn test_concurrent_monitor_claim() { // Watchtower A receives block, broadcasts state N, then channel receives new state N+1, // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it, @@ -8666,8 +8666,8 @@ fn test_concurrent_monitor_claim() { } } -#[test] -fn test_pre_lockin_no_chan_closed_update() { +#[xtest(feature = "_test_utils")] +pub fn test_pre_lockin_no_chan_closed_update() { // Test that if a peer closes a channel in response to a funding_created message we don't // generate a channel update (as the channel cannot appear on chain without a funding_signed // message). @@ -8705,8 +8705,8 @@ fn test_pre_lockin_no_chan_closed_update() { [nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_htlc_no_detection() { +#[xtest(feature = "_test_utils")] +pub fn test_htlc_no_detection() { // This test is a mutation to underscore the detection logic bug we had // before #653. HTLC value routed is above the remaining balance, thus // inverting HTLC and `to_remote` output. HTLC will come second and @@ -8936,16 +8936,16 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain } } -#[test] -fn test_onchain_htlc_settlement_after_close() { +#[xtest(feature = "_test_utils")] +pub fn test_onchain_htlc_settlement_after_close() { do_test_onchain_htlc_settlement_after_close(true, true); do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well do_test_onchain_htlc_settlement_after_close(true, false); do_test_onchain_htlc_settlement_after_close(false, false); } -#[test] -fn test_duplicate_temporary_channel_id_from_different_peers() { +#[xtest(feature = "_test_utils")] +pub fn test_duplicate_temporary_channel_id_from_different_peers() { // Tests that we can accept two different `OpenChannel` requests with the same // `temporary_channel_id`, as long as they are from different peers. let chanmon_cfgs = create_chanmon_cfgs(3); @@ -8994,8 +8994,8 @@ fn test_duplicate_temporary_channel_id_from_different_peers() { } } -#[test] -fn test_peer_funding_sidechannel() { +#[xtest(feature = "_test_utils")] +pub fn test_peer_funding_sidechannel() { // Test that if a peer somehow learns which txid we'll use for our channel funding before we // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning @@ -9049,8 +9049,8 @@ fn test_peer_funding_sidechannel() { get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); } -#[test] -fn test_duplicate_conflicting_funding_from_second_peer() { +#[xtest(feature = "_test_utils")] +pub fn test_duplicate_conflicting_funding_from_second_peer() { // Test that if a user tries to fund a channel with a funding outpoint they'd previously used // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks @@ -9089,8 +9089,8 @@ fn test_duplicate_conflicting_funding_from_second_peer() { check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]); } -#[test] -fn test_duplicate_funding_err_in_funding() { +#[xtest(feature = "_test_utils")] +pub fn test_duplicate_funding_err_in_funding() { // Test that if we have a live channel with one peer, then another peer comes along and tries // to create a second channel with the same txid we'll fail and not overwrite the // outpoint_to_peer map in `ChannelManager`. @@ -9137,8 +9137,8 @@ fn test_duplicate_funding_err_in_funding() { ); } -#[test] -fn test_duplicate_chan_id() { +#[xtest(feature = "_test_utils")] +pub fn test_duplicate_chan_id() { // Test that if a given peer tries to open a channel with the same channel_id as one that is // already open we reject it and keep the old channel. // @@ -9291,8 +9291,8 @@ fn test_duplicate_chan_id() { send_payment(&nodes[0], &[&nodes[1]], 8000000); } -#[test] -fn test_error_chans_closed() { +#[xtest(feature = "_test_utils")] +pub fn test_error_chans_closed() { // Test that we properly handle error messages, closing appropriate channels. // // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different @@ -9357,8 +9357,8 @@ fn test_error_chans_closed() { assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); } -#[test] -fn test_invalid_funding_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_invalid_funding_tx() { // Test that we properly handle invalid funding transactions sent to us from a peer. // // Previously, all other major lightning implementations had failed to properly sanitize @@ -9445,8 +9445,8 @@ fn test_invalid_funding_tx() { mine_transaction(&nodes[1], &spend_tx); } -#[test] -fn test_coinbase_funding_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_coinbase_funding_tx() { // Miners are able to fund channels directly from coinbase transactions, however // by consensus rules, outputs of a coinbase transaction are encumbered by a 100 // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable @@ -9587,8 +9587,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t } } -#[test] -fn test_tx_confirmed_skipping_blocks_immediate_broadcast() { +#[xtest(feature = "_test_utils")] +pub fn test_tx_confirmed_skipping_blocks_immediate_broadcast() { do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false); do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true); } @@ -9680,8 +9680,8 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { } } -#[test] -fn test_dup_htlc_second_fail_panic() { +#[xtest(feature = "_test_utils")] +pub fn test_dup_htlc_second_fail_panic() { // Previously, if we received two HTLCs back-to-back, where the second overran the expected // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event. // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed @@ -9689,15 +9689,15 @@ fn test_dup_htlc_second_fail_panic() { do_test_dup_htlc_second_rejected(true); } -#[test] -fn test_dup_htlc_second_rejected() { +#[xtest(feature = "_test_utils")] +pub fn test_dup_htlc_second_rejected() { // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we // simply reject the second HTLC but are still able to claim the first HTLC. do_test_dup_htlc_second_rejected(false); } -#[test] -fn test_inconsistent_mpp_params() { +#[xtest(feature = "_test_utils")] +pub fn test_inconsistent_mpp_params() { // Test that if we recieve two HTLCs with different payment parameters we fail back the first // such HTLC and allow the second to stay. let chanmon_cfgs = create_chanmon_cfgs(4); @@ -9805,8 +9805,8 @@ fn test_inconsistent_mpp_params() { expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true); } -#[test] -fn test_double_partial_claim() { +#[xtest(feature = "_test_utils")] +pub fn test_double_partial_claim() { // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs // time out, the sender resends only some of the MPP parts, then the user processes the // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment @@ -10070,14 +10070,14 @@ fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit); } -#[test] -fn test_max_dust_htlc_exposure() { +#[xtest(feature = "_test_utils")] +pub fn test_max_dust_htlc_exposure() { do_test_max_dust_htlc_exposure_by_threshold_type(false); do_test_max_dust_htlc_exposure_by_threshold_type(true); } -#[test] -fn test_non_final_funding_tx() { +#[xtest(feature = "_test_utils")] +pub fn test_non_final_funding_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -10121,8 +10121,8 @@ fn test_non_final_funding_tx() { } } -#[test] -fn test_non_final_funding_tx_within_headroom() { +#[xtest(feature = "_test_utils")] +pub fn test_non_final_funding_tx_within_headroom() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -10155,8 +10155,8 @@ fn test_non_final_funding_tx_within_headroom() { get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); } -#[test] -fn accept_busted_but_better_fee() { +#[xtest(feature = "_test_utils")] +pub fn accept_busted_but_better_fee() { // If a peer sends us a fee update that is too low, but higher than our previous channel // feerate, we should accept it. In the future we may want to consider closing the channel // later, but for now we only accept the update. @@ -10284,16 +10284,16 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash } } -#[test] -fn test_payment_with_custom_min_cltv_expiry_delta() { +#[xtest(feature = "_test_utils")] +pub fn test_payment_with_custom_min_cltv_expiry_delta() { do_payment_with_custom_min_final_cltv_expiry(false, false); do_payment_with_custom_min_final_cltv_expiry(false, true); do_payment_with_custom_min_final_cltv_expiry(true, false); do_payment_with_custom_min_final_cltv_expiry(true, true); } -#[test] -fn test_disconnects_peer_awaiting_response_ticks() { +#[xtest(feature = "_test_utils")] +pub fn test_disconnects_peer_awaiting_response_ticks() { // Tests that nodes which are awaiting on a response critical for channel responsiveness // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. let mut chanmon_cfgs = create_chanmon_cfgs(2); @@ -10421,8 +10421,8 @@ fn test_disconnects_peer_awaiting_response_ticks() { } } -#[test] -fn test_remove_expired_outbound_unfunded_channels() { +#[xtest(feature = "_test_utils")] +pub fn test_remove_expired_outbound_unfunded_channels() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -10472,8 +10472,8 @@ fn test_remove_expired_outbound_unfunded_channels() { check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000); } -#[test] -fn test_remove_expired_inbound_unfunded_channels() { +#[xtest(feature = "_test_utils")] +pub fn test_remove_expired_inbound_unfunded_channels() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -10523,8 +10523,8 @@ fn test_remove_expired_inbound_unfunded_channels() { check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000); } -#[test] -fn test_channel_close_when_not_timely_accepted() { +#[xtest(feature = "_test_utils")] +pub fn test_channel_close_when_not_timely_accepted() { // Create network of two nodes let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -10566,8 +10566,8 @@ fn test_channel_close_when_not_timely_accepted() { } } -#[test] -fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { +#[xtest(feature = "_test_utils")] +pub fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { // Create network of two nodes let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -10688,14 +10688,14 @@ fn do_test_multi_post_event_actions(do_reload: bool) { check_added_monitors(&nodes[0], 3); } -#[test] -fn test_multi_post_event_actions() { +#[xtest(feature = "_test_utils")] +pub fn test_multi_post_event_actions() { do_test_multi_post_event_actions(true); do_test_multi_post_event_actions(false); } -#[test] -fn test_batch_channel_open() { +#[xtest(feature = "_test_utils")] +pub fn test_batch_channel_open() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -10762,8 +10762,8 @@ fn test_batch_channel_open() { ))); } -#[test] -fn test_close_in_funding_batch() { +#[xtest(feature = "_test_utils")] +pub fn test_close_in_funding_batch() { // This test ensures that if one of the channels // in the batch closes, the complete batch will close. let chanmon_cfgs = create_chanmon_cfgs(3); @@ -10843,8 +10843,8 @@ fn test_close_in_funding_batch() { assert!(nodes[0].node.list_channels().is_empty()); } -#[test] -fn test_batch_funding_close_after_funding_signed() { +#[xtest(feature = "_test_utils")] +pub fn test_batch_funding_close_after_funding_signed() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -10982,14 +10982,14 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen assert!(nodes[1].node.list_channels().is_empty()); } -#[test] -fn test_funding_and_commitment_tx_confirm_same_block() { +#[xtest(feature = "_test_utils")] +pub fn test_funding_and_commitment_tx_confirm_same_block() { do_test_funding_and_commitment_tx_confirm_same_block(false); do_test_funding_and_commitment_tx_confirm_same_block(true); } -#[test] -fn test_accept_inbound_channel_errors_queued() { +#[xtest(feature = "_test_utils")] +pub fn test_accept_inbound_channel_errors_queued() { // For manually accepted inbound channels, tests that a close error is correctly handled // and the channel fails for the initiator. let mut config0 = test_default_channel_config(); diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index 15a39bbe0d4..b8ad80258a4 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -19,7 +19,7 @@ use crate::prelude::*; use core::cmp; use crate::sync::{Mutex, Arc}; -#[cfg(test)] use crate::sync::MutexGuard; +#[cfg(any(test, feature = "_test_utils"))] use crate::sync::MutexGuard; use bitcoin::blockdata::transaction::Transaction; use bitcoin::hashes::Hash; @@ -111,7 +111,7 @@ impl TestChannelSigner { pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features().unwrap() } - #[cfg(test)] + #[cfg(any(test, feature = "_test_utils"))] pub fn get_enforcement_state(&self) -> MutexGuard { self.state.lock().unwrap() } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index fefd78879e9..ee5222d4823 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -732,7 +732,7 @@ impl TestChannelMessageHandler { } } - #[cfg(any(test, feature = "_test_utils"))] + #[cfg(test)] pub(crate) fn expect_receive_msg(&self, ev: wire::Message<()>) { let mut expected_msgs = self.expected_recv_msgs.lock().unwrap(); if expected_msgs.is_none() { *expected_msgs = Some(Vec::new()); }