Skip to content

Commit

Permalink
Add basic OutputSweeper test in BP
Browse files Browse the repository at this point in the history
.. we simply check that the `OutputSweeper` generates a spending tx and
that the `TrackedSpendableOutput` is pruned once it reaches
`ANTI_REORG_DELAY`.
  • Loading branch information
tnull committed Apr 16, 2024
1 parent 1d48b01 commit 986bfcd
Showing 1 changed file with 103 additions and 4 deletions.
107 changes: 103 additions & 4 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -919,14 +919,16 @@ impl Drop for BackgroundProcessor {

#[cfg(all(feature = "std", test))]
mod tests {
use bitcoin::ScriptBuf;
use bitcoin::blockdata::constants::{genesis_block, ChainHash};
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
use lightning::chain::{BestBlock, Confirm, chainmonitor};
use lightning::chain::{BestBlock, Confirm, chainmonitor, Filter};
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
use lightning::sign::{InMemorySigner, KeysManager};
use lightning::chain::chaininterface::{ConfirmationTarget, FeeEstimator};
use lightning::sign::{InMemorySigner, KeysManager, SpendableOutputDescriptor};
use lightning::chain::transaction::OutPoint;
use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
use lightning::{get_event_msg, get_event};
Expand All @@ -947,6 +949,7 @@ mod tests {
CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
use lightning::util::sweep::{OutputSweeper, OutputSpendStatus};
use lightning_persister::fs_store::FilesystemStore;
use std::collections::VecDeque;
use std::{fs, env};
Expand Down Expand Up @@ -1009,6 +1012,7 @@ mod tests {
logger: Arc<test_utils::TestLogger>,
best_block: BestBlock,
scorer: Arc<LockingWrapper<TestScorer>>,
sweeper: Arc<OutputSweeper<Arc<test_utils::TestBroadcaster>, Arc<dyn Filter + Sync + Send>, Arc<FilesystemStore>, Arc<test_utils::TestLogger>>>,
}

impl Node {
Expand Down Expand Up @@ -1271,10 +1275,30 @@ mod tests {
let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), Arc::clone(&keys_manager), scorer.clone(), Default::default()));
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
let now = Duration::from_secs(genesis_block.header.time as u64);
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
let best_block = BestBlock::from_network(network);
let params = ChainParameters { network, best_block };
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));

let spend_fee_estimator = Arc::clone(&fee_estimator);
let spend_keys_manager = Arc::clone(&keys_manager);
let spend_outputs_callback = move |output_descriptors: &[&SpendableOutputDescriptor]| {
let fee_rate = spend_fee_estimator
.get_est_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
spend_keys_manager.spend_spendable_outputs(
output_descriptors,
Vec::new(),
ScriptBuf::new(),
fee_rate,
None,
&Secp256k1::new(),
)
};
let sweeper = Arc::new(OutputSweeper::new(Arc::clone(&tx_broadcaster),
Arc::clone(&kv_store), best_block, None::<Arc<dyn Filter + Sync + Send>>,
Arc::clone(&logger), spend_outputs_callback));
let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
let msg_handler = MessageHandler {
Expand All @@ -1283,7 +1307,7 @@ mod tests {
onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
};
let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer, sweeper };
nodes.push(node);
}

Expand Down Expand Up @@ -1352,15 +1376,32 @@ mod tests {
1 => {
node.node.transactions_confirmed(&header, &txdata, height);
node.chain_monitor.transactions_confirmed(&header, &txdata, height);
node.sweeper.transactions_confirmed(&header, &txdata, height);
},
x if x == depth => {
node.node.best_block_updated(&header, height);
node.chain_monitor.best_block_updated(&header, height);
node.sweeper.best_block_updated(&header, height);
},
_ => {},
}
}
}

fn advance_chain(node: &mut Node, num_blocks: u32) {
for i in 1..=num_blocks {
let prev_blockhash = node.best_block.block_hash;
let height = node.best_block.height + 1;
let header = create_dummy_header(prev_blockhash, height);
node.best_block = BestBlock::new(header.block_hash(), height);
if i == num_blocks {
node.node.best_block_updated(&header, height);
node.chain_monitor.best_block_updated(&header, height);
node.sweeper.best_block_updated(&header, height);
}
}
}

fn confirm_transaction(node: &mut Node, tx: &Transaction) {
confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
}
Expand Down Expand Up @@ -1592,6 +1633,9 @@ mod tests {
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
let broadcast_funding = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
assert_eq!(broadcast_funding.txid(), funding_tx.txid());
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());

if !std::thread::panicking() {
bg_processor.stop().unwrap();
Expand All @@ -1617,10 +1661,65 @@ mod tests {
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
.expect("Events not handled within deadline");
match event {
Event::SpendableOutputs { .. } => {},
Event::SpendableOutputs { outputs, channel_id } => {
nodes[0].sweeper.track_spendable_outputs(outputs, channel_id, false);
},
_ => panic!("Unexpected event: {:?}", event),
}

// Check we generate an initial sweeping tx.
assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
let sweep_tx_0 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
match tracked_output.status {
OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
assert_eq!(sweep_tx_0.txid(), latest_spending_tx.txid());
}
_ => panic!("Unexpected status"),
}

// Check we regenerate and rebroadcast the sweeping tx each block.
advance_chain(&mut nodes[0], 1);
assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
let sweep_tx_1 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
match tracked_output.status {
OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
assert_eq!(sweep_tx_1.txid(), latest_spending_tx.txid());
}
_ => panic!("Unexpected status"),
}
assert_ne!(sweep_tx_0, sweep_tx_1);

advance_chain(&mut nodes[0], 1);
assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
let sweep_tx_2 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
match tracked_output.status {
OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
}
_ => panic!("Unexpected status"),
}
assert_ne!(sweep_tx_0, sweep_tx_2);
assert_ne!(sweep_tx_1, sweep_tx_2);

// Check we still track the spendable outputs up to ANTI_REORG_DELAY confirmations.
confirm_transaction_depth(&mut nodes[0], &sweep_tx_2, 5);
assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
match tracked_output.status {
OutputSpendStatus::PendingThresholdConfirmations { latest_spending_tx, .. } => {
assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
}
_ => panic!("Unexpected status"),
}

// Check we stop tracking the spendable outputs when one of the txs reaches
// ANTI_REORG_DELAY confirmations.
confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY);
assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0);

if !std::thread::panicking() {
bg_processor.stop().unwrap();
}
Expand Down

0 comments on commit 986bfcd

Please sign in to comment.