From 1ece5a9aa0de2dfb901183109ea4006863f36d90 Mon Sep 17 00:00:00 2001 From: Arik Sosman Date: Wed, 15 Jan 2025 09:13:06 -0800 Subject: [PATCH 01/16] Consider dust threshold for fee rate determination Previously, the `feerate_bump` method did not enforce the dust threshold, which could result in us thinking we had raised the fee rate without actually having done so. Instead, `compute_package_output` blindly accepted the updated fee rate while enforcing a non-dust output value, resulting in repeated broadcast attempts of an identical transaction. Conflicts due to removal of a preceding commit resolved in: * lightning/src/chain/package.rs --- lightning/src/chain/onchaintx.rs | 1 + lightning/src/chain/package.rs | 44 ++++++++++++++++++++------------ 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 759668cfa9c..2a43b006920 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -215,6 +215,7 @@ pub(crate) enum OnchainClaim { } /// Represents the different feerate strategies a pending request can use when generating a claim. +#[derive(Debug)] pub(crate) enum FeerateStrategy { /// We must reuse the most recently used feerate, if any. RetryPrevious, diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 53bba3a754b..3698b041eb7 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1117,10 +1117,10 @@ impl PackageTemplate { // If old feerate is 0, first iteration of this claim, use normal fee calculation if self.feerate_previous != 0 { if let Some((new_fee, feerate)) = feerate_bump( - predicted_weight, input_amounts, self.feerate_previous, feerate_strategy, - conf_target, fee_estimator, logger, + predicted_weight, input_amounts, dust_limit_sats, self.feerate_previous, + feerate_strategy, conf_target, fee_estimator, logger, ) { - return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); + return Some((input_amounts.saturating_sub(new_fee), feerate)); } } else { if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) { @@ -1270,16 +1270,20 @@ fn compute_fee_from_spent_amounts( /// respect BIP125 rules 3) and 4) and if required adjust the new fee to meet the RBF policy /// requirement. fn feerate_bump( - predicted_weight: u64, input_amounts: u64, previous_feerate: u64, feerate_strategy: &FeerateStrategy, - conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + predicted_weight: u64, input_amounts: u64, dust_limit_sats: u64, previous_feerate: u64, + feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u64, u64)> where F::Target: FeeEstimator, { + let previous_fee = previous_feerate * predicted_weight / 1000; + // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) { + log_debug!(logger, "Initiating fee rate bump from {} s/kWU ({} s) to {} s/kWU ({} s) using {:?} strategy", previous_feerate, previous_fee, new_feerate, new_fee, feerate_strategy); match feerate_strategy { FeerateStrategy::RetryPrevious => { let previous_fee = previous_feerate * predicted_weight / 1000; @@ -1297,15 +1301,12 @@ where // ...else just increase the previous feerate by 25% (because that's a nice number) let bumped_feerate = previous_feerate + (previous_feerate / 4); let bumped_fee = bumped_feerate * predicted_weight / 1000; - if input_amounts <= bumped_fee { - log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts); - return None; - } + (bumped_fee, bumped_feerate) }, } } else { - log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts); + log_warn!(logger, "Can't bump new claiming tx, input amount {} is too small", input_amounts); return None; }; @@ -1316,17 +1317,26 @@ where return Some((new_fee, new_feerate)); } - let previous_fee = previous_feerate * predicted_weight / 1000; let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * predicted_weight / 1000; // BIP 125 Opt-in Full Replace-by-Fee Signaling // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions. // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting. - let new_fee = if new_fee < previous_fee + min_relay_fee { - new_fee + previous_fee + min_relay_fee - new_fee - } else { - new_fee - }; - Some((new_fee, new_fee * 1000 / predicted_weight)) + let naive_new_fee = new_fee; + let new_fee = cmp::max(new_fee, previous_fee + min_relay_fee); + + if new_fee > naive_new_fee { + log_debug!(logger, "Naive fee bump of {}s does not meet min relay fee requirements of {}s", naive_new_fee - previous_fee, min_relay_fee); + } + + let remaining_output_amount = input_amounts.saturating_sub(new_fee); + if remaining_output_amount < dust_limit_sats { + log_warn!(logger, "Can't bump new claiming tx, output amount {} would end up below dust threshold {}", remaining_output_amount, dust_limit_sats); + return None; + } + + let new_feerate = new_fee * 1000 / predicted_weight; + log_debug!(logger, "Fee rate bumped by {}s from {} s/KWU ({} s) to {} s/KWU ({} s)", new_fee - previous_fee, previous_feerate, previous_fee, new_feerate, new_fee); + Some((new_fee, new_feerate)) } #[cfg(test)] From 6d8e18b230eeb1774293d2a0ea0ee1839dcf7c6b Mon Sep 17 00:00:00 2001 From: Arik Sosman Date: Tue, 21 Jan 2025 06:11:58 -0800 Subject: [PATCH 02/16] Fix incremental relay fee to be 1s/vB Bitcoin Core relay policy does not require 16s/vB, which it was previously set to. Trivial conflicts due to removal of a preceding commit resolved in: * lightning/src/chain/chaininterface.rs --- lightning/src/chain/chaininterface.rs | 2 +- lightning/src/ln/functional_tests.rs | 62 ++++++++++++++++----------- 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 84281df1d7b..b9c7e88420d 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -176,7 +176,7 @@ pub trait FeeEstimator { } /// Minimum relay fee as required by bitcoin network mempool policy. -pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 4000; +pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253; /// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding. /// See the following Core Lightning commit for an explanation: /// diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e52870cf19d..123b394f374 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1313,18 +1313,22 @@ fn test_duplicate_htlc_different_direction_onchain() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 + let payment_value_sats = 582; + let payment_value_msats = payment_value_sats * 1000; + // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_value_msats); let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], payment_value_msats, payment_hash, node_a_payment_secret); // Provide preimage to node 0 by claiming payment nodes[0].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[0], payment_hash, 800_000); + expect_payment_claimed!(nodes[0], payment_hash, payment_value_msats); check_added_monitors!(nodes[0], 1); // Broadcast node 1 commitment txn @@ -1333,7 +1337,7 @@ fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound let mut has_both_htlcs = 0; // check htlcs match ones committed for outp in remote_txn[0].output.iter() { - if outp.value.to_sat() == 800_000 / 1000 { + if outp.value.to_sat() == payment_value_sats { has_both_htlcs += 1; } else if outp.value.to_sat() == 900_000 / 1000 { has_both_htlcs += 1; @@ -1353,18 +1357,15 @@ fn test_duplicate_htlc_different_direction_onchain() { check_spends!(claim_txn[1], remote_txn[0]); check_spends!(claim_txn[2], remote_txn[0]); let preimage_tx = &claim_txn[0]; - let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output { - (&claim_txn[1], &claim_txn[2]) - } else { - (&claim_txn[2], &claim_txn[1]) - }; + let timeout_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output != preimage_tx.input[0].previous_output).unwrap(); + let preimage_bump_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output == preimage_tx.input[0].previous_output).unwrap(); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_bump_tx.input.len(), 1); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx - assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), 800); + assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), payment_value_sats); assert_eq!(timeout_tx.input.len(), 1); assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx @@ -7935,22 +7936,31 @@ fn test_bump_penalty_txn_on_remote_commitment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); - route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; - - // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC - let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); - assert_eq!(remote_txn[0].output.len(), 4); - assert_eq!(remote_txn[0].input.len(), 1); - assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); - - // Claim a HTLC without revocation (provide B monitor with preimage) - nodes[1].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); - mine_transaction(&nodes[1], &remote_txn[0]); - check_added_monitors!(nodes[1], 2); - connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + let remote_txn = { + // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 + let htlc_value_a_msats = 582_000; + let htlc_value_b_msats = 583_000; + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); + route_payment(&nodes[1], &vec!(&nodes[0])[..], htlc_value_b_msats); + + // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC + let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); + assert_eq!(remote_txn[0].output.len(), 4); + assert_eq!(remote_txn[0].input.len(), 1); + assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); + + // Claim a HTLC without revocation (provide B monitor with preimage) + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, htlc_value_a_msats); + mine_transaction(&nodes[1], &remote_txn[0]); + check_added_monitors!(nodes[1], 2); + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + // depending on the block connection style, node 1 may have broadcast either 3 or 10 txs + + remote_txn + }; // One or more claim tx should have been broadcast, check it let timeout; From bc6ae06f409b077f57b5c2ef56e4fd15442a82bc Mon Sep 17 00:00:00 2001 From: Arik Sosman Date: Fri, 17 Jan 2025 10:53:44 -0800 Subject: [PATCH 03/16] Test fee rate bumping Create some tests for various `feerate_bump` scenarios and ensure among other thigns that there are no underflows. --- lightning/src/chain/package.rs | 85 +++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 3698b041eb7..55214006d4c 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1341,7 +1341,7 @@ where #[cfg(test)] mod tests { - use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, WEIGHT_REVOKED_OUTPUT, weight_offered_htlc, weight_received_htlc}; + use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, WEIGHT_REVOKED_OUTPUT, weight_offered_htlc, weight_received_htlc, feerate_bump}; use crate::chain::Txid; use crate::ln::chan_utils::HTLCOutputInCommitment; use crate::types::payment::{PaymentPreimage, PaymentHash}; @@ -1359,7 +1359,10 @@ mod tests { use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::Secp256k1; + use crate::chain::chaininterface::{ConfirmationTarget, FeeEstimator, FEERATE_FLOOR_SATS_PER_KW, LowerBoundedFeeEstimator}; + use crate::chain::onchaintx::FeerateStrategy; use crate::types::features::ChannelTypeFeatures; + use crate::util::test_utils::TestLogger; fn fake_txid(n: u64) -> Txid { Transaction { @@ -1669,4 +1672,84 @@ mod tests { } } } + + struct TestFeeEstimator { + sat_per_kw: u32, + } + + impl FeeEstimator for TestFeeEstimator { + fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 { + self.sat_per_kw + } + } + + #[test] + fn test_feerate_bump() { + let sat_per_kw = FEERATE_FLOOR_SATS_PER_KW; + let test_fee_estimator = &TestFeeEstimator { sat_per_kw }; + let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator); + let fee_rate_strategy = FeerateStrategy::ForceBump; + let confirmation_target = ConfirmationTarget::UrgentOnChainSweep; + + { + // Check underflow doesn't occur + let predicted_weight_units = 1000; + let input_satoshis = 505; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, input amount 505 is too small").unwrap(), 1); + } + + { + // Check post-25%-bump-underflow scenario satisfying the following constraints: + // input - fee = 546 + // input - fee * 1.25 = -1 + + // We accomplish that scenario with the following values: + // input = 2734 + // fee = 2188 + + let predicted_weight_units = 1000; + let input_satoshis = 2734; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 2188, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 0 would end up below dust threshold 546").unwrap(), 1); + } + + { + // Check that an output amount of 0 is caught + let predicted_weight_units = 1000; + let input_satoshis = 506; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 0 would end up below dust threshold 546").unwrap(), 1); + } + + { + // Check that dust_threshold - 1 is blocked + let predicted_weight_units = 1000; + let input_satoshis = 1051; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 545 would end up below dust threshold 546").unwrap(), 1); + } + + { + let predicted_weight_units = 1000; + let input_satoshis = 1052; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger).unwrap(); + assert_eq!(bumped_fee_rate, (506, 506)); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Naive fee bump of 63s does not meet min relay fee requirements of 253s").unwrap(), 1); + } + } } From dd5bec95044b048597e11d8f4cb26a2f9d77ac71 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 15 Jan 2025 16:29:44 -0500 Subject: [PATCH 04/16] Outbound payments: pass session privs by reference We need to stop passing this Vec by value for the next commit so we can pass it to a different method. --- lightning/src/ln/outbound_payment.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index c0dea7df52d..4a4b4aaffb2 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -803,7 +803,7 @@ impl OutboundPayments { { let onion_session_privs = self.add_new_pending_payment(payment_hash, recipient_onion.clone(), payment_id, None, route, None, None, entropy_source, best_block_height)?; self.pay_route_internal(route, payment_hash, &recipient_onion, None, None, payment_id, None, - onion_session_privs, node_signer, best_block_height, &send_payment_along_path) + &onion_session_privs, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } @@ -983,7 +983,7 @@ impl OutboundPayments { let result = self.pay_route_internal( &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, payment_id, - Some(route_params.final_value_msat), onion_session_privs, node_signer, best_block_height, + Some(route_params.final_value_msat), &onion_session_privs, node_signer, best_block_height, &send_payment_along_path ); log_info!( @@ -1269,7 +1269,7 @@ impl OutboundPayments { })?; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, - keysend_preimage, None, payment_id, None, onion_session_privs, node_signer, + keysend_preimage, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); @@ -1426,7 +1426,7 @@ impl OutboundPayments { } }; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, - invoice_request.as_ref(), payment_id, Some(total_msat), onion_session_privs, node_signer, + invoice_request.as_ref(), payment_id, Some(total_msat), &onion_session_privs, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { @@ -1542,7 +1542,7 @@ impl OutboundPayments { let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); match self.pay_route_internal(&route, payment_hash, &recipient_onion_fields, - None, None, payment_id, None, onion_session_privs, node_signer, best_block_height, + None, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path ) { Ok(()) => Ok((payment_hash, payment_id)), @@ -1733,7 +1733,7 @@ impl OutboundPayments { fn pay_route_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, - payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>, + payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> where @@ -1788,7 +1788,7 @@ impl OutboundPayments { let mut path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, - session_priv_bytes + session_priv_bytes: *session_priv_bytes }); match path_res { Ok(_) => {}, @@ -1872,7 +1872,7 @@ impl OutboundPayments { F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { self.pay_route_internal(route, payment_hash, &recipient_onion, - keysend_preimage, None, payment_id, recv_value_msat, onion_session_privs, + keysend_preimage, None, payment_id, recv_value_msat, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } From 152f3578222bc33edeb7d0af831e5d7bdd33b22c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 15 Jan 2025 16:43:36 -0500 Subject: [PATCH 05/16] Fix outbound payments memory leak on buggy router MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior to this patch, if we attempted to send a payment or probe to a buggy route, we would error but continue storing the pending outbound payment forever. Attempts to retry would result in a “duplicate payment” error. In the case of ChannelManager::send_payment, we would also fail to generate a PaymentFailed event, even if the user manually called abandon_payment. This bug is unlikely to have ever been hit in the wild as most users use LDK’s router. Discovered in the course of adding a new send_to_route API. Now, we’ll properly generate events and remove the outbound from storage. --- lightning/src/ln/channelmanager.rs | 1 + lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/outbound_payment.rs | 57 ++++++++++++--- lightning/src/ln/payment_tests.rs | 77 +++++++++++++++++++- pending_changelog/3531-buggy-router-leak.txt | 4 + 5 files changed, 127 insertions(+), 13 deletions(-) create mode 100644 pending_changelog/3531-buggy-router-leak.txt diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a5ae07eab7f..d58fbaab1af 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14797,6 +14797,7 @@ mod tests { }, _ => panic!("unexpected error") } + assert!(nodes[0].node.list_recent_payments().is_empty()); } #[test] diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 123b394f374..853985311fe 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -6495,6 +6495,7 @@ fn test_payment_route_reaching_same_channel_twice() { RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), false, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Path went through the same channel twice")); + assert!(nodes[0].node.list_recent_payments().is_empty()); } // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 4a4b4aaffb2..8517d599c12 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -992,9 +992,9 @@ impl OutboundPayments { ); if let Err(e) = result { self.handle_pay_route_err( - e, payment_id, payment_hash, route, route_params, router, first_hops, - &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, - pending_events, &send_payment_along_path + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + &send_payment_along_path ); } Ok(()) @@ -1274,7 +1274,11 @@ impl OutboundPayments { log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); if let Err(e) = res { - self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path); + self.handle_pay_route_err( + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + &send_payment_along_path + ); } Ok(()) } @@ -1430,15 +1434,21 @@ impl OutboundPayments { best_block_height, &send_payment_along_path); log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { - self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path); + self.handle_pay_route_err( + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + send_payment_along_path + ); } } fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, - mut route_params: RouteParameters, router: &R, first_hops: Vec, - inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, logger: &L, - pending_events: &Mutex)>>, send_payment_along_path: &SP, + mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, + first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, + best_block_height: u32, logger: &L, + pending_events: &Mutex)>>, + send_payment_along_path: &SP, ) where R::Target: Router, @@ -1467,11 +1477,13 @@ impl OutboundPayments { }, PaymentSendFailure::PathParameterError(results) => { log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); + self.remove_session_privs(payment_id, &route, onion_session_privs); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), logger, pending_events); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); + self.remove_session_privs(payment_id, &route, onion_session_privs); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::DuplicatePayment => debug_assert!(false), // unreachable @@ -1511,6 +1523,21 @@ impl OutboundPayments { } } + // If a payment fails after adding the pending payment but before any HTLCs are locked into + // channels, we need to clear the session_privs in order for abandoning the payment to succeed. + fn remove_session_privs( + &self, payment_id: PaymentId, route: &Route, onion_session_privs: Vec<[u8; 32]> + ) { + if let Some(payment) = self.pending_outbound_payments.lock().unwrap().get_mut(&payment_id) { + for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.into_iter()) { + let removed = payment.remove(&session_priv_bytes, Some(path)); + debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); + } + } else { + debug_assert!(false, "This can't happen as the payment was added by callers"); + } + } + pub(super) fn send_probe( &self, path: Path, probing_cookie_secret: [u8; 32], entropy_source: &ES, node_signer: &NS, best_block_height: u32, send_payment_along_path: F @@ -1784,7 +1811,7 @@ impl OutboundPayments { let cur_height = best_block_height + 1; let mut results = Vec::new(); debug_assert_eq!(route.paths.len(), onion_session_privs.len()); - for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.into_iter()) { + for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) { let mut path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, @@ -1880,9 +1907,15 @@ impl OutboundPayments { // If we failed to send any paths, remove the new PaymentId from the `pending_outbound_payments` // map as the payment is free to be resent. fn remove_outbound_if_all_failed(&self, payment_id: PaymentId, err: &PaymentSendFailure) { - if let &PaymentSendFailure::AllFailedResendSafe(_) = err { - let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some(); - debug_assert!(removed, "We should always have a pending payment to remove here"); + match err { + PaymentSendFailure::AllFailedResendSafe(_) + | PaymentSendFailure::ParameterError(_) + | PaymentSendFailure::PathParameterError(_) => + { + let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some(); + debug_assert!(removed, "We should always have a pending payment to remove here"); + }, + PaymentSendFailure::DuplicatePayment | PaymentSendFailure::PartialFailure { .. } => {} } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0c9c5d0e920..0b06a18eae7 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -24,7 +24,7 @@ use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; use crate::ln::chan_utils; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::onion_utils; -use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry, RetryableSendFailure}; +use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; use crate::routing::scoring::ChannelUsage; @@ -1249,6 +1249,7 @@ fn sent_probe_is_probe_of_sending_node() { // First check we refuse to build a single-hop probe let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err()); + assert!(nodes[0].node.list_recent_payments().is_empty()); // Then build an actual two-hop probing path let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000); @@ -4375,3 +4376,77 @@ fn test_non_strict_forwarding() { let events = nodes[0].node.get_and_clear_pending_events(); expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid)); } + +#[test] +fn remove_pending_outbounds_on_buggy_router() { + // Ensure that if a payment errors due to a bogus route, we'll abandon the payment and remove the + // pending outbound from storage. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_id = PaymentId([42; 32]); + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + let route_params = route.route_params.clone().unwrap(); + nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); + + nodes[0].node.send_payment( + payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, route_params, + Retry::Attempts(1) // Even though another attempt is allowed, the payment should fail + ).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match &events[0] { + Event::PaymentPathFailed { failure, payment_failed_permanently, .. } => { + assert_eq!(failure, &PathFailure::InitialSend { + err: APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } + }); + assert!(!payment_failed_permanently); + }, + _ => panic!() + } + match events[1] { + Event::PaymentFailed { reason, .. } => { + assert_eq!(reason.unwrap(), PaymentFailureReason::UnexpectedError); + }, + _ => panic!() + } + assert!(nodes[0].node.list_recent_payments().is_empty()); +} + +#[test] +fn remove_pending_outbound_probe_on_buggy_path() { + // Ensure that if a probe errors due to a bogus route, we'll return an error and remove the + // pending outbound from storage. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + + assert_eq!( + nodes[0].node.send_probe(route.paths.pop().unwrap()).unwrap_err(), + ProbeSendFailure::ParameterError( + APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } + ) + ); + assert!(nodes[0].node.list_recent_payments().is_empty()); +} diff --git a/pending_changelog/3531-buggy-router-leak.txt b/pending_changelog/3531-buggy-router-leak.txt new file mode 100644 index 00000000000..72714aa8a8b --- /dev/null +++ b/pending_changelog/3531-buggy-router-leak.txt @@ -0,0 +1,4 @@ +## Bug Fixes + +* Fixed a rare case where a custom router returning a buggy route could result in holding onto a + pending payment forever and in some cases failing to generate a PaymentFailed event (#3531). From 440a8cc69caf3e970a7593602a46f154ad77ebba Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 17 Jan 2025 12:20:50 -0500 Subject: [PATCH 06/16] Unify session_priv removal on PaymentSendFailure When an outbound payment fails while paying to a route, we need to remove the session_privs for each failed path in the outbound payment. Previously we were sometimes removing in pay_route_internal and sometimes in handle_pay_route_err, so refactor this so we always remove in handle_pay_route_err. --- lightning/src/ln/outbound_payment.rs | 46 +++++++++++++--------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 8517d599c12..996df77a69b 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -1460,10 +1460,24 @@ impl OutboundPayments { { match err { PaymentSendFailure::AllFailedResendSafe(errs) => { + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), logger, pending_events); self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path); }, PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => { + debug_assert_eq!(results.len(), route.paths.len()); + debug_assert_eq!(results.len(), onion_session_privs.len()); + let failed_paths = results.iter().zip(route.paths.iter().zip(onion_session_privs.iter())) + .filter_map(|(path_res, (path, session_priv))| { + match path_res { + // While a MonitorUpdateInProgress is an Err(_), the payment is still + // considered "in flight" and we shouldn't remove it from the + // PendingOutboundPayment set. + Ok(_) | Err(APIError::MonitorUpdateInProgress) => None, + _ => Some((path, session_priv)) + } + }); + self.remove_session_privs(payment_id, failed_paths); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), logger, pending_events); // Some paths were sent, even if we failed to send the full MPP value our recipient may // misbehave and claim the funds, at which point we have to consider the payment sent, so @@ -1477,13 +1491,13 @@ impl OutboundPayments { }, PaymentSendFailure::PathParameterError(results) => { log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); - self.remove_session_privs(payment_id, &route, onion_session_privs); + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), logger, pending_events); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); - self.remove_session_privs(payment_id, &route, onion_session_privs); + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::DuplicatePayment => debug_assert!(false), // unreachable @@ -1525,12 +1539,12 @@ impl OutboundPayments { // If a payment fails after adding the pending payment but before any HTLCs are locked into // channels, we need to clear the session_privs in order for abandoning the payment to succeed. - fn remove_session_privs( - &self, payment_id: PaymentId, route: &Route, onion_session_privs: Vec<[u8; 32]> + fn remove_session_privs<'a, I: Iterator>( + &self, payment_id: PaymentId, path_session_priv: I ) { if let Some(payment) = self.pending_outbound_payments.lock().unwrap().get_mut(&payment_id) { - for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.into_iter()) { - let removed = payment.remove(&session_priv_bytes, Some(path)); + for (path, session_priv_bytes) in path_session_priv { + let removed = payment.remove(session_priv_bytes, Some(path)); debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); } } else { @@ -1812,29 +1826,11 @@ impl OutboundPayments { let mut results = Vec::new(); debug_assert_eq!(route.paths.len(), onion_session_privs.len()); for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) { - let mut path_res = send_payment_along_path(SendAlongPathArgs { + let path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, session_priv_bytes: *session_priv_bytes }); - match path_res { - Ok(_) => {}, - Err(APIError::MonitorUpdateInProgress) => { - // While a MonitorUpdateInProgress is an Err(_), the payment is still - // considered "in flight" and we shouldn't remove it from the - // PendingOutboundPayment set. - }, - Err(_) => { - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - if let Some(payment) = pending_outbounds.get_mut(&payment_id) { - let removed = payment.remove(&session_priv_bytes, Some(path)); - debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); - } else { - debug_assert!(false, "This can't happen as the payment was added by callers"); - path_res = Err(APIError::APIMisuseError { err: "Internal error: payment disappeared during processing. Please report this bug!".to_owned() }); - } - } - } results.push(path_res); } let mut has_ok = false; From f09d33b3eb7ebe2269306d8356ec968bffd13f38 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 14 Jan 2025 14:27:02 -0500 Subject: [PATCH 07/16] Reinstate ChannelManager::send_payment_with_route API Support more ergonomically sending payments to specific routes. We removed the original version of this API because it was hard to work with, but the concept of sending a payment to a specific route is still useful. Previously, users were able to do this via manually matching the payment id in their router, but that's cumbersome when we could just handle it internally. Trivial `use` conflicts resolved in: * lightning/src/ln/chanmon_update_fail_tests.rs * lightning/src/ln/functional_tests.rs Silent rebase conflicts resolved in: * lightning/src/routing/router.rs --- fuzz/src/chanmon_consistency.rs | 31 +++------- lightning/src/chain/channelmonitor.rs | 11 ++-- lightning/src/ln/chanmon_update_fail_tests.rs | 33 ++++------ lightning/src/ln/channelmanager.rs | 61 +++++++++++-------- lightning/src/ln/functional_test_utils.rs | 41 ++++++------- lightning/src/ln/functional_tests.rs | 42 ++++++------- lightning/src/ln/outbound_payment.rs | 17 ------ lightning/src/ln/payment_tests.rs | 43 ++++++++++--- lightning/src/ln/shutdown_tests.rs | 6 +- lightning/src/routing/router.rs | 40 ++++++++++++ 10 files changed, 179 insertions(+), 146 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c2a49b8ee24..73e4f88f1f3 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -48,7 +48,7 @@ use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, - RecipientOnionFields, Retry, + RecipientOnionFields, }; use lightning::ln::functional_test_utils::*; use lightning::ln::inbound_payment::ExpandedKey; @@ -82,7 +82,6 @@ use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey} use lightning::io::Cursor; use std::cmp::{self, Ordering}; -use std::collections::VecDeque; use std::mem; use std::sync::atomic; use std::sync::{Arc, Mutex}; @@ -113,22 +112,14 @@ impl FeeEstimator for FuzzEstimator { } } -struct FuzzRouter { - pub next_routes: Mutex>, -} +struct FuzzRouter {} impl Router for FuzzRouter { fn find_route( &self, _payer: &PublicKey, _params: &RouteParameters, _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs, ) -> Result { - if let Some(route) = self.next_routes.lock().unwrap().pop_front() { - return Ok(route); - } - Err(msgs::LightningError { - err: String::from("Not implemented"), - action: msgs::ErrorAction::IgnoreError, - }) + unreachable!() } fn create_blinded_payment_paths( @@ -518,7 +509,7 @@ fn send_payment( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), amt, ); - source.router.next_routes.lock().unwrap().push_back(Route { + let route = Route { paths: vec![Path { hops: vec![RouteHop { pubkey: dest.get_our_node_id(), @@ -532,11 +523,10 @@ fn send_payment( blinded_tail: None, }], route_params: Some(route_params.clone()), - }); + }; let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_id); - let res = - source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0)); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { panic!("Errored with {:?} on initial payment send", err); @@ -592,7 +582,7 @@ fn send_hop_payment( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), amt, ); - source.router.next_routes.lock().unwrap().push_back(Route { + let route = Route { paths: vec![Path { hops: vec![ RouteHop { @@ -617,11 +607,10 @@ fn send_hop_payment( blinded_tail: None, }], route_params: Some(route_params.clone()), - }); + }; let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_id); - let res = - source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0)); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { panic!("Errored with {:?} on initial payment send", err); @@ -640,7 +629,7 @@ fn send_hop_payment( pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); let broadcast = Arc::new(TestBroadcaster {}); - let router = FuzzRouter { next_routes: Mutex::new(VecDeque::new()) }; + let router = FuzzRouter {}; macro_rules! make_node { ($node_id: expr, $fee_estimator: expr) => {{ diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 3f6bdc3f256..62207eeafbb 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -5092,7 +5092,7 @@ mod tests { use crate::chain::chaininterface::LowerBoundedFeeEstimator; use super::ChannelMonitorUpdateStep; - use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err}; + use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash}; use crate::chain::{BestBlock, Confirm}; use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor}; use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT}; @@ -5102,10 +5102,9 @@ mod tests { use crate::types::payment::{PaymentPreimage, PaymentHash}; use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey}; use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; - use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields}; + use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; use crate::ln::script::ShutdownScript; - use crate::util::errors::APIError; use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator}; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::logger::Logger; @@ -5166,9 +5165,9 @@ mod tests { // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass // the update through to the ChannelMonitor which will refuse it (as the channel is closed). let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[1].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) + ).unwrap(); check_added_monitors!(nodes[1], 1); // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index fcc1f8f5a64..2d01ece1158 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,13 +19,12 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; -use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use crate::util::test_channel_signer::TestChannelSigner; -use crate::util::errors::APIError; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::TestBroadcaster; @@ -133,9 +132,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -190,9 +189,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -257,9 +256,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -2004,16 +2003,10 @@ fn test_path_paused_mpp() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - // Now check that we get the right return value, indicating that the first path succeeded but - // the second got a MonitorUpdateInProgress err. This implies - // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry. - if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route( + // The first path should have succeeded with the second getting a MonitorUpdateInProgress err. + nodes[0].node.send_payment_with_route( route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ) { - assert_eq!(results.len(), 2); - if let Ok(()) = results[0] {} else { panic!(); } - if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); } - } else { panic!(); } + ).unwrap(); check_added_monitors!(nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d58fbaab1af..a5978013c3b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -55,9 +55,7 @@ use crate::ln::channel_state::ChannelDetails; use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::types::features::Bolt11InvoiceFeatures; -use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, RouteParameters, Router}; -#[cfg(test)] -use crate::routing::router::Route; +use crate::routing::router::{BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; @@ -2397,9 +2395,6 @@ where fee_estimator: LowerBoundedFeeEstimator, chain_monitor: M, tx_broadcaster: T, - #[cfg(fuzzing)] - pub router: R, - #[cfg(not(fuzzing))] router: R, message_router: MR, @@ -4622,21 +4617,31 @@ where } } - // Deprecated send method, for testing use [`Self::send_payment`] and - // [`TestRouter::expect_find_route`] instead. - // - // [`TestRouter::expect_find_route`]: crate::util::test_utils::TestRouter::expect_find_route - #[cfg(test)] - pub(crate) fn send_payment_with_route( - &self, route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, + /// Sends a payment along a given route. See [`Self::send_payment`] for more info. + /// + /// LDK will not automatically retry this payment, though it may be manually re-sent after an + /// [`Event::PaymentFailed`] is generated. + pub fn send_payment_with_route( + &self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId - ) -> Result<(), PaymentSendFailure> { + ) -> Result<(), RetryableSendFailure> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let route_params = route.route_params.clone().unwrap_or_else(|| { + // Create a dummy route params since they're a required parameter but unused in this case + let (payee_node_id, cltv_delta) = route.paths.first() + .and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32))) + .unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32)); + let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta); + RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount()) + }); + if route.route_params.is_none() { route.route_params = Some(route_params.clone()); } + let router = FixedRouter::new(route); self.pending_outbound_payments - .send_payment_with_route(&route, payment_hash, recipient_onion, payment_id, - &self.entropy_source, &self.node_signer, best_block_height, - |args| self.send_payment_along_path(args)) + .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0), + route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(), + &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + &self.pending_events, |args| self.send_payment_along_path(args)) } /// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed @@ -4665,7 +4670,8 @@ where /// [`ChannelManager::list_recent_payments`] for more information. /// /// Routes are automatically found using the [`Router] provided on startup. To fix a route for a - /// particular payment, match the [`PaymentId`] passed to [`Router::find_route_with_id`]. + /// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to + /// [`Router::find_route_with_id`]. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed @@ -14363,7 +14369,7 @@ mod tests { use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId}; + use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, RecipientOnionFields, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, ErrorAction}; use crate::ln::msgs::ChannelMessageHandler; @@ -14789,14 +14795,17 @@ mod tests { route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; - match nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)) - .unwrap_err() { - PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { - assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) - }, - _ => panic!("unexpected error") + nodes[0].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { reason, .. } => { + assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError); + } + _ => panic!() } + nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2); assert!(nodes[0].node.list_recent_payments().is_empty()); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index b4f172b4a27..63341969326 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1064,30 +1064,27 @@ macro_rules! get_local_commitment_txn { /// Check the error from attempting a payment. #[macro_export] macro_rules! unwrap_send_err { - ($res: expr, $all_failed: expr, $type: pat, $check: expr) => { - match &$res { - &Err(PaymentSendFailure::AllFailedResendSafe(ref fails)) if $all_failed => { - assert_eq!(fails.len(), 1); - match fails[0] { - $type => { $check }, - _ => panic!(), - } - }, - &Err(PaymentSendFailure::PartialFailure { ref results, .. }) if !$all_failed => { - assert_eq!(results.len(), 1); - match results[0] { - Err($type) => { $check }, - _ => panic!(), - } - }, - &Err(PaymentSendFailure::PathParameterError(ref result)) if !$all_failed => { - assert_eq!(result.len(), 1); - match result[0] { - Err($type) => { $check }, - _ => panic!(), + ($node: expr, $res: expr, $all_failed: expr, $type: pat, $check: expr) => { + assert!($res.is_ok()); + let events = $node.node.get_and_clear_pending_events(); + assert!(events.len() == 2); + match &events[0] { + crate::events::Event::PaymentPathFailed { failure, .. } => { + match failure { + crate::events::PathFailure::InitialSend { err } => { + match err { + $type => { $check }, + _ => panic!() + } + }, + _ => panic!() } }, - _ => {panic!()}, + _ => panic!() + } + match &events[1] { + crate::events::Event::PaymentFailed { .. } => {}, + _ => panic!() } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 853985311fe..9b7a6c83f18 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -23,7 +23,7 @@ use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvi use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; -use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; @@ -1187,7 +1187,7 @@ fn holding_cell_htlc_counting() { // the holding cell waiting on B's RAA to send. At this point we should not be able to add // another HTLC. { - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, payment_hash_1, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, payment_hash_1, RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1411,14 +1411,8 @@ fn test_basic_channel_reserve() { get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap(); - match err { - PaymentSendFailure::AllFailedResendSafe(ref fails) => { - if let &APIError::ChannelUnavailable { .. } = &fails[0] {} - else { panic!("Unexpected error variant"); } - }, - _ => panic!("Unexpected error variant"), - } + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)); + unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); @@ -1604,7 +1598,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { } // However one more HTLC should be significantly over the reserve amount and fail. - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1704,7 +1698,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); route.paths[0].hops[0].fee_msat += 1; - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } @@ -1916,7 +1910,7 @@ fn test_channel_reserve_holding_cell_htlcs() { route.paths[0].hops.last_mut().unwrap().fee_msat += 1; assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1988,7 +1982,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let mut route = route_1.clone(); route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2018,7 +2012,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -6491,7 +6485,7 @@ fn test_payment_route_reaching_same_channel_twice() { let cloned_hops = route.paths[0].hops.clone(); route.paths[0].hops.extend_from_slice(&cloned_hops); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), false, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Path went through the same channel twice")); @@ -6514,7 +6508,7 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 100; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -6531,13 +6525,13 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 0; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send 0-msat HTLC")); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 2); } #[test] @@ -6578,7 +6572,7 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Channel CLTV overflowed?")); @@ -6622,7 +6616,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); } - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); @@ -6646,7 +6640,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { // Manually create a route over our max in flight (which our router normally automatically // limits us to. route.paths[0].hops[0].fee_msat = max_in_flight + 1; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -10305,11 +10299,11 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; // With default dust exposure: 5000 sats if on_holder_tx { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } else { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 996df77a69b..7b579b7a261 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -790,23 +790,6 @@ impl OutboundPayments { best_block_height, logger, pending_events, &send_payment_along_path) } - #[cfg(test)] - pub(super) fn send_payment_with_route( - &self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, - payment_id: PaymentId, entropy_source: &ES, node_signer: &NS, best_block_height: u32, - send_payment_along_path: F - ) -> Result<(), PaymentSendFailure> - where - ES::Target: EntropySource, - NS::Target: NodeSigner, - F: Fn(SendAlongPathArgs) -> Result<(), APIError> - { - let onion_session_privs = self.add_new_pending_payment(payment_hash, recipient_onion.clone(), payment_id, None, route, None, None, entropy_source, best_block_height)?; - self.pay_route_internal(route, payment_hash, &recipient_onion, None, None, payment_id, None, - &onion_session_privs, node_signer, best_block_height, &send_payment_along_path) - .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) - } - pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0b06a18eae7..0963ed0aa4f 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -16,7 +16,7 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATE use crate::sign::EntropySource; use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; +use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -599,7 +599,7 @@ fn no_pending_leak_on_initial_send_failure() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Peer for first hop currently disconnected")); @@ -946,7 +946,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // confirming, we will fail as it's considered still-pending... let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }); match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -985,7 +985,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1004,7 +1004,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); match nodes[0].node.send_payment_with_route(new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1548,7 +1548,7 @@ fn claimed_send_payment_idempotent() { let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } @@ -1627,7 +1627,7 @@ fn abandoned_send_payment_idempotent() { let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } @@ -4450,3 +4450,32 @@ fn remove_pending_outbound_probe_on_buggy_path() { ); assert!(nodes[0].node.list_recent_payments().is_empty()); } + +#[test] +fn pay_route_without_params() { + // Make sure we can use ChannelManager::send_payment_with_route to pay a route where + // Route::route_parameters is None. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + route.route_params.take(); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0) + ).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), node_1_msgs, true, None); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) + ); +} diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9fd428329af..960dc441ae5 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -15,7 +15,7 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::chain::transaction::OutPoint; use crate::events::{Event, MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; -use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry}; +use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -364,10 +364,10 @@ fn updates_shutdown_wait() { let route_params = RouteParameters::from_payment_params_and_value(payment_params_2, 100_000); let route_2 = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None, &logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route_1, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route_1, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable {..}, {}); - unwrap_send_err!(nodes[1].node.send_payment_with_route(route_2, payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route_2, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable {..}, {}); diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 78a93aa0d39..04f55837267 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -25,6 +25,7 @@ use crate::offers::invoice::Bolt12Invoice; use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId}; use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp}; use crate::sign::EntropySource; +use crate::sync::Mutex; use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer}; use crate::util::logger::{Level, Logger}; use crate::crypto::chacha20::ChaCha20; @@ -185,6 +186,45 @@ impl>, L: Deref, ES: Deref, S: Deref, SP: Size } } +/// A `Router` that returns a fixed route one time, erroring otherwise. Useful for +/// `ChannelManager::send_payment_with_route` to support sending to specific routes without +/// requiring a custom `Router` implementation. +pub(crate) struct FixedRouter { + // Use an `Option` to avoid needing to clone the route when `find_route` is called. + route: Mutex>, +} + +impl FixedRouter { + pub(crate) fn new(route: Route) -> Self { + Self { route: Mutex::new(Some(route)) } + } +} + +impl Router for FixedRouter { + fn find_route( + &self, _payer: &PublicKey, _route_params: &RouteParameters, + _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs + ) -> Result { + self.route.lock().unwrap().take().ok_or_else(|| { + LightningError { + err: "Can't use this router to return multiple routes".to_owned(), + action: ErrorAction::IgnoreError, + } + }) + } + + fn create_blinded_payment_paths< + T: secp256k1::Signing + secp256k1::Verification + > ( + &self, _recipient: PublicKey, _first_hops: Vec, _tlvs: ReceiveTlvs, + _amount_msats: u64, _secp_ctx: &Secp256k1 + ) -> Result, ()> { + // Should be unreachable as this router is only intended to provide a one-time payment route. + debug_assert!(false); + Err(()) + } +} + /// A trait defining behavior for routing a payment. pub trait Router { /// Finds a [`Route`] for a payment between the given `payer` and a payee. From a31d70d5793be6de38602f8a8eae721d5d6af8b9 Mon Sep 17 00:00:00 2001 From: Devrandom Date: Fri, 17 Jan 2025 10:52:08 +0100 Subject: [PATCH 08/16] RawBolt11Invoice to/from ascii utilities for remote signing of invoices --- lightning-invoice/src/lib.rs | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 07c8342b5ea..17cc41f9502 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -48,6 +48,7 @@ use core::iter::FilterMap; use core::num::ParseIntError; use core::ops::Deref; use core::slice::Iter; +use core::str::FromStr; use core::time::Duration; #[cfg(feature = "serde")] @@ -78,8 +79,12 @@ use crate::prelude::*; /// Re-export serialization traits #[cfg(fuzzing)] pub use crate::de::FromBase32; +#[cfg(not(fuzzing))] +use crate::de::FromBase32; #[cfg(fuzzing)] pub use crate::ser::Base32Iterable; +#[cfg(not(fuzzing))] +use crate::ser::Base32Iterable; /// Errors that indicate what is wrong with the invoice. They have some granularity for debug /// reasons, but should generally result in an "invalid BOLT11 invoice" message for the user. @@ -1086,9 +1091,6 @@ impl RawBolt11Invoice { /// Calculate the hash of the encoded `RawBolt11Invoice` which should be signed. pub fn signable_hash(&self) -> [u8; 32] { - #[cfg(not(fuzzing))] - use crate::ser::Base32Iterable; - Self::hash_from_parts(self.hrp.to_string().as_bytes(), self.data.fe_iter()) } @@ -1189,6 +1191,21 @@ impl RawBolt11Invoice { pub fn currency(&self) -> Currency { self.hrp.currency.clone() } + + /// Convert to HRP prefix and Fe32 encoded data part. + /// Can be used to transmit unsigned invoices for remote signing. + pub fn to_raw(&self) -> (String, Vec) { + (self.hrp.to_string(), self.data.fe_iter().collect()) + } + + /// Convert from HRP prefix and Fe32 encoded data part. + /// Can be used to receive unsigned invoices for remote signing. + pub fn from_raw(hrp: &str, data: &[Fe32]) -> Result { + let raw_hrp: RawHrp = RawHrp::from_str(hrp)?; + let data_part = RawDataPart::from_base32(data)?; + + Ok(Self { hrp: raw_hrp, data: data_part }) + } } impl PositiveTimestamp { From 710598df99a31b95369c84a929d487d3884b73ee Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Sun, 19 Jan 2025 23:57:13 +0000 Subject: [PATCH 09/16] Add cltv expiry to PendingHTLCRouting::Forward In a coming commit we'll expire HTLCs backwards even if we haven't yet claimed them on-chain based on their inbound edge being close to causing a channel force-closure. Here we track the incoming edge's CLTV expiry in the pending-routing state so that we can include it in the `HTLCSource` in the next commit. Co-authored-by: Matt Corallo --- lightning/src/ln/channelmanager.rs | 15 +++++++++++++-- lightning/src/ln/onion_payment.rs | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a5978013c3b..90c2ffd704f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -164,6 +164,8 @@ pub enum PendingHTLCRouting { short_channel_id: u64, // This should be NonZero eventually when we bump MSRV /// Set if this HTLC is being forwarded within a blinded path. blinded: Option, + /// The absolute CLTV of the inbound HTLC + incoming_cltv_expiry: Option, }, /// The onion indicates that this is a payment for an invoice (supposedly) generated by us. /// @@ -264,6 +266,14 @@ impl PendingHTLCRouting { _ => None, } } + + fn incoming_cltv_expiry(&self) -> Option { + match self { + Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry, + Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -5541,9 +5551,9 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, .. } => { + PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { PendingHTLCRouting::Forward { - onion_packet, blinded, short_channel_id: next_hop_scid + onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid, } }, _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted @@ -12378,6 +12388,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, onion_packet, required), (1, blinded, option), (2, short_channel_id, required), + (3, incoming_cltv_expiry, option), }, (1, Receive) => { (0, payment_data, required), diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 193cdd1582a..f9d4f371227 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -110,6 +110,7 @@ pub(super) fn create_fwd_pending_htlc_info( routing: PendingHTLCRouting::Forward { onion_packet: outgoing_packet, short_channel_id, + incoming_cltv_expiry: Some(msg.cltv_expiry), blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, From 9723b2eb18c4b8d4f504889c114a28bcef3d39d7 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Mon, 20 Jan 2025 00:09:00 +0000 Subject: [PATCH 10/16] Add cltv expiry to HTLCPreviousHopData In a coming commit we'll expire HTLCs backwards even if we haven't yet claimed them on-chain based on their inbound edge being close to causing a channel force-closure. Here we track and expose the incoming edge's CLTV expiry in the `HTLCSource`, giving `ChannelMonitor` access to it. Co-authored-by: Matt Corallo --- lightning/src/ln/channelmanager.rs | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 90c2ffd704f..b4003a7a50e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -394,6 +394,9 @@ pub(crate) struct HTLCPreviousHopData { // channel with a preimage provided by the forward channel. outpoint: OutPoint, counterparty_node_id: Option, + /// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward + /// channel remains unconfirmed for too long. + cltv_expiry: Option, } #[derive(PartialEq, Eq)] @@ -696,6 +699,15 @@ impl HTLCSource { true } } + + /// Returns the CLTV expiry of the inbound HTLC (i.e. the source referred to by this object), + /// if the source was a forwarded HTLC and the HTLC was first forwarded on LDK 0.1.1 or later. + pub(crate) fn inbound_htlc_expiry(&self) -> Option { + match self { + Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry, + _ => None, + } + } } /// This enum is used to specify which error data to send to peers when failing back an HTLC @@ -5592,7 +5604,7 @@ where err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; - if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { + if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing { let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), @@ -5603,6 +5615,7 @@ where incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: payment.forward_info.routing.blinded_failure(), + cltv_expiry: incoming_cltv_expiry, }); let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); @@ -5777,6 +5790,7 @@ where outgoing_cltv_value, .. } }) => { + let cltv_expiry = routing.incoming_cltv_expiry(); macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash)); @@ -5792,6 +5806,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret: $phantom_ss, blinded_failure: routing.blinded_failure(), + cltv_expiry, }); let reason = if $next_hop_unknown { @@ -5901,7 +5916,7 @@ where prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { - ref onion_packet, blinded, .. + ref onion_packet, blinded, incoming_cltv_expiry, .. }, skimmed_fee_msat, .. }, }) => { @@ -5916,6 +5931,7 @@ where // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, blinded_failure: blinded.map(|b| b.failure), + cltv_expiry: incoming_cltv_expiry, }); let next_blinding_point = blinded.and_then(|b| { b.next_blinding_override.or_else(|| { @@ -6090,6 +6106,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete @@ -6123,6 +6140,7 @@ where incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, @@ -8998,6 +9016,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: forward_info.routing.blinded_failure(), + cltv_expiry: forward_info.routing.incoming_cltv_expiry(), }); failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, @@ -11161,6 +11180,7 @@ where outpoint: htlc.prev_funding_outpoint, channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), + cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(), }); let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { @@ -12504,6 +12524,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (2, outpoint, required), (3, blinded_failure, option), (4, htlc_id, required), + (5, cltv_expiry, option), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), // Note that by the time we get past the required read for type 2 above, outpoint will be From c90c394f7088a8166ec0f9823f9ad1287e9112a9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 21 Jan 2025 22:00:51 +0000 Subject: [PATCH 11/16] Drop `Channel::historical_inbound_htlc_fulfills` This field was used to test that any HTLC failures didn't come in after an HTLC was fulfilled (indicating, somewhat dubiously, that there may be a bug causing us to fail when we shouldn't have). In the next commit, we'll be failing HTLCs based on on-chain HTLC expiry, but may ultimately receive the preimage thereafter. This would make the `historical_inbound_htlc_fulfills` checks potentially-brittle, so we just remove them as they have dubious value. --- lightning/src/ln/channel.rs | 86 ++++-------------------------- lightning/src/ln/channelmanager.rs | 1 - 2 files changed, 9 insertions(+), 78 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5e3d72a6241..c15a4bee643 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1453,15 +1453,6 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// [`msgs::RevokeAndACK`] message from the counterparty. sent_message_awaiting_response: Option, - #[cfg(any(test, fuzzing))] - // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the - // corresponding HTLC on the inbound path. If, then, the outbound path channel is - // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack - // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This - // is fine, but as a sanity check in our failure to generate the second claim, we check here - // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. - historical_inbound_htlc_fulfills: HashSet, - /// This channel's type, as negotiated during channel open channel_type: ChannelTypeFeatures, @@ -2210,9 +2201,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -2443,9 +2431,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -4472,10 +4457,6 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and - // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } @@ -4505,8 +4486,6 @@ impl Channel where if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: self.context.latest_monitor_update_id -= 1; - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } }, @@ -4528,12 +4507,8 @@ impl Channel where self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; @@ -4598,14 +4573,8 @@ impl Channel where } } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result<(), ChannelError> where L::Target: Logger { self.fail_htlc(htlc_id_arg, err_packet, true, logger) @@ -4623,14 +4592,8 @@ impl Channel where .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. fn fail_htlc( &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, logger: &L @@ -4648,12 +4611,8 @@ impl Channel where if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - } - return Ok(None); + InboundHTLCState::LocalRemoved(_) => { + return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); @@ -4664,11 +4623,7 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this - // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc_id_arg))); } if !self.context.channel_state.can_generate_new_commitment() { @@ -4682,17 +4637,14 @@ impl Channel where match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("HTLC {} was already claimed!", htlc_id))); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + return Err(ChannelError::Ignore(format!("HTLC {} was already pending failure", htlc_id))); } }, _ => {} @@ -9543,13 +9495,6 @@ impl Writeable for Channel where SP::Target: SignerProvider { self.context.channel_update_status.write(writer)?; - #[cfg(any(test, fuzzing))] - (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; - #[cfg(any(test, fuzzing))] - for htlc in self.context.historical_inbound_htlc_fulfills.iter() { - htlc.write(writer)?; - } - // If the channel type is something other than only-static-remote-key, then we need to have // older clients fail to deserialize this channel at all. If the type is // only-static-remote-key, we simply consider it "default" and don't write the channel type @@ -9883,16 +9828,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let channel_update_status = Readable::read(reader)?; - #[cfg(any(test, fuzzing))] - let mut historical_inbound_htlc_fulfills = new_hash_set(); - #[cfg(any(test, fuzzing))] - { - let htlc_fulfills_len: u64 = Readable::read(reader)?; - for _ in 0..htlc_fulfills_len { - assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?)); - } - } - let pending_update_fee = if let Some(feerate) = pending_update_fee_value { Some((feerate, if channel_parameters.is_outbound_from_holder { FeeUpdateState::Outbound @@ -10233,9 +10168,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills, - channel_type: channel_type.unwrap(), channel_keys_id, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b4003a7a50e..6c8f3139c6a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6047,7 +6047,6 @@ where // fail-backs are best-effort, we probably already have one // pending, and if not that's OK, if not, the channel is on // the chain and sending the HTLC-Timeout is their problem. - continue; } } } From d5fed87bd9e8f31afbe89207830f55fcd1d662c3 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Tue, 21 Jan 2025 22:03:01 +0000 Subject: [PATCH 12/16] Fail HTLC backwards before upstream claims on-chain Fail inbound HTLCs if they expire within a certain number of blocks from the current height. If we haven't seen the preimage for an HTLC by the time the previous hop's timeout expires, we've lost that HTLC, so we might as well fail it back instead of having our counterparty force-close the channel. Co-authored-by: Matt Corallo --- lightning/src/chain/channelmonitor.rs | 74 ++++++++++++ lightning/src/ln/functional_tests.rs | 158 ++++++++++++++++++++++++-- 2 files changed, 224 insertions(+), 8 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 62207eeafbb..7cf3a320508 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1023,6 +1023,12 @@ pub(crate) struct ChannelMonitorImpl { /// The first block height at which we had no remaining claimable balances. balances_empty_height: Option, + + /// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to + /// a downstream channel force-close remaining unconfirmed by the time the upstream timeout + /// expires. This is used to tell us we already generated an event to fail this HTLC back + /// during a previous block scan. + failed_back_htlc_ids: HashSet, } /// Transaction outputs to watch for on-chain spends. @@ -1445,6 +1451,8 @@ impl ChannelMonitor { counterparty_node_id: Some(counterparty_node_id), initial_counterparty_commitment_info: None, balances_empty_height: None, + + failed_back_htlc_ids: new_hash_set(), }) } @@ -4221,6 +4229,71 @@ impl ChannelMonitorImpl { } } + if self.lockdown_from_offchain || self.funding_spend_seen || self.holder_tx_signed { + // Fail back HTLCs on backwards channels if they expire within + // `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a + // point where no further off-chain updates will be accepted). If we haven't seen the + // preimage for an HTLC by the time the previous hop's timeout expires, we've lost that + // HTLC, so we might as well fail it back instead of having our counterparty force-close + // the inbound channel. + let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter() + .map(|&(ref a, _, ref b)| (a, b.as_ref())); + + let current_counterparty_htlcs = if let Some(txid) = self.current_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let prev_counterparty_htlcs = if let Some(txid) = self.prev_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let htlcs = current_holder_htlcs + .chain(current_counterparty_htlcs) + .chain(prev_counterparty_htlcs); + + let height = self.best_block.height; + for (htlc, source_opt) in htlcs { + // Only check forwarded HTLCs' previous hops + let source = match source_opt { + Some(source) => source, + None => continue, + }; + let inbound_htlc_expiry = match source.inbound_htlc_expiry() { + Some(cltv_expiry) => cltv_expiry, + None => continue, + }; + let max_expiry_height = height.saturating_add(LATENCY_GRACE_PERIOD_BLOCKS); + if inbound_htlc_expiry > max_expiry_height { + continue; + } + let duplicate_event = self.pending_monitor_events.iter().any( + |update| if let &MonitorEvent::HTLCEvent(ref upd) = update { + upd.source == *source + } else { false }); + if duplicate_event { + continue; + } + if !self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)) { + continue; + } + if !duplicate_event { + log_error!(logger, "Failing back HTLC {} upstream to preserve the \ + channel as the forward HTLC hasn't resolved and our backward HTLC \ + expires soon at {}", log_bytes!(htlc.payment_hash.0), inbound_htlc_expiry); + self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { + source: source.clone(), + payment_preimage: None, + payment_hash: htlc.payment_hash, + htlc_value_satoshis: Some(htlc.amount_msat / 1000), + })); + } + } + } + let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); @@ -5066,6 +5139,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP counterparty_node_id, initial_counterparty_commitment_info, balances_empty_height, + failed_back_htlc_ids: new_hash_set(), }))) } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 9b7a6c83f18..b29ee99e077 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2277,6 +2277,138 @@ fn channel_reserve_in_flight_removes() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } +enum PostFailBackAction { + TimeoutOnChain, + ClaimOnChain, + FailOffChain, + ClaimOffChain, +} + +#[test] +fn test_fail_back_before_backwards_timeout() { + do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); +} + +fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { + // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream + // just before the upstream timeout expires + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + for node in nodes.iter() { + *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; + } + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Start every node on the same block height to make reasoning about timeouts easier + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + + // Force close the B<->C channel by timing out the HTLC + let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; + connect_blocks(&nodes[1], timeout_blocks); + let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); + check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + + // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid + // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV + + // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which + // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`). + let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2; + connect_blocks(&nodes[1], upstream_timeout_blocks); + + // Connect blocks for nodes[0] to make sure they don't go on-chain + connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); + + // Check that nodes[1] fails the HTLC upstream + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + check_added_monitors!(nodes[1], 1); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; + + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, + PaymentFailedConditions::new().blamed_chan_closed(true)); + + // Make sure we handle possible duplicate fails or extra messages after failing back + match post_fail_back_action { + PostFailBackAction::TimeoutOnChain => { + // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again + mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + // Expect handling another fail back event, but the HTLC is already gone + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOnChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + + connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); + check_closed_broadcast!(nodes[2], true); + check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000); + check_added_monitors!(nodes[2], 1); + + mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::FailOffChain => { + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], + vec![HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fail = commitment_update.update_fail_htlcs[0].clone(); + + nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOffChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); + + nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + }; +} + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -2381,7 +2513,7 @@ fn channel_monitor_network_test() { let node2_commitment_txid; { let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); - connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); + connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); node2_commitment_txid = node_txn[0].compute_txid(); @@ -3319,8 +3451,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false - , [nodes[2].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[2].node.get_our_node_id()], 100000); let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) @@ -9792,6 +9924,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); @@ -9807,7 +9941,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let conf_height = nodes[1].best_block_info().1; if !test_height_before_timelock { - connect_blocks(&nodes[1], 24 * 6); + connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); } nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); @@ -9826,10 +9960,6 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t &spending_txn[0] }; check_spends!(htlc_tx, node_txn[0]); - // We should also generate a SpendableOutputs event with the to_self output (as its - // timelock is up). - let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); - assert_eq!(descriptor_spend_txn.len(), 1); // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we // should immediately fail-backwards the HTLC to the previous hop, without waiting for an @@ -9848,6 +9978,18 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); + + // We should also generate a SpendableOutputs event with the to_self output (once the + // timelock is up). + connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1); + let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); + assert_eq!(descriptor_spend_txn.len(), 1); + + // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to + // avoid the A<->B channel closing (even though it already has). This will generate a + // spurious HTLCHandlingFailed event. + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); } } From c65d12d30739f72f57a025d97a8c98997fb124ff Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 21 Jan 2025 21:53:01 +0000 Subject: [PATCH 13/16] Fail all `ChannelMonitorUpdate`s after `holder_tx_signed` If we've signed the latest holder tx (i.e. we've force-closed and broadcasted our state), there's not much reason to accept counterparty-transaction-updating `ChannelMonitorUpdate`s, we should make sure the `ChannelManager` fails the channel as soon as possible. This standardizes the failure cases to also match those added to the previous commit, which makes things a bit more readable. --- lightning/src/chain/channelmonitor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 7cf3a320508..e1f47060fbe 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3282,7 +3282,7 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain) && is_pre_close_update { + if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed) && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret } From 15a567d5754ca438d3966878fea7d4ed10a340ed Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 24 Jan 2025 09:44:40 +0100 Subject: [PATCH 14/16] Introduce `SpendableOutputDescriptor::outpoint` accessor --- lightning/src/sign/mod.rs | 9 +++++++++ lightning/src/util/sweep.rs | 8 +------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 2be0cb39f4f..7aa41531ce2 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -538,6 +538,15 @@ impl SpendableOutputDescriptor { }; Ok((psbt, expected_max_weight)) } + + /// Returns the outpoint of the spendable output. + pub fn outpoint(&self) -> OutPoint { + match self { + Self::StaticOutput { outpoint, .. } => *outpoint, + Self::StaticPaymentOutput(descriptor) => descriptor.outpoint, + Self::DelayedPaymentOutput(descriptor) => descriptor.outpoint, + } + } } /// The parameters required to derive a channel signer via [`SignerProvider`]. diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index b61306194df..78acef9d727 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -71,13 +71,7 @@ impl TrackedSpendableOutput { /// Returns whether the output is spent in the given transaction. pub fn is_spent_in(&self, tx: &Transaction) -> bool { - let prev_outpoint = match &self.descriptor { - SpendableOutputDescriptor::StaticOutput { outpoint, .. } => *outpoint, - SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.outpoint, - SpendableOutputDescriptor::StaticPaymentOutput(output) => output.outpoint, - } - .into_bitcoin_outpoint(); - + let prev_outpoint = self.descriptor.outpoint().into_bitcoin_outpoint(); tx.input.iter().any(|input| input.previous_output == prev_outpoint) } } From 6712b4e24ed6df03fb91b16b51c6a08d9543422e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 24 Jan 2025 10:57:28 +0100 Subject: [PATCH 15/16] Prefactor: Make monior archival delay a `pub const` .. previously we just used the 4032 magic number, here we put it in a `pub const` that is reusable elsewhere. --- lightning/src/chain/channelmonitor.rs | 16 ++++++++++------ lightning/src/ln/monitor_tests.rs | 18 +++++++++--------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index e1f47060fbe..8788fbb894d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -256,6 +256,10 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3; // solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not // keep bumping another claim tx to solve the outpoint. pub const ANTI_REORG_DELAY: u32 = 6; +/// Number of blocks we wait before assuming a [`ChannelMonitor`] to be fully resolved and +/// considering it be safely archived. +// 4032 blocks are roughly four weeks +pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032; /// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we /// refuse to accept a new HTLC. /// @@ -2023,10 +2027,11 @@ impl ChannelMonitor { /// /// This function returns a tuple of two booleans, the first indicating whether the monitor is /// fully resolved, and the second whether the monitor needs persistence to ensure it is - /// reliably marked as resolved within 4032 blocks. + /// reliably marked as resolved within [`ARCHIVAL_DELAY_BLOCKS`] blocks. /// - /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at least - /// 4032 blocks as an additional protection against any bugs resulting in spuriously empty balance sets. + /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at + /// least [`ARCHIVAL_DELAY_BLOCKS`] blocks as an additional protection against any bugs + /// resulting in spuriously empty balance sets. pub fn check_and_update_full_resolution_status(&self, logger: &L) -> (bool, bool) { let mut is_all_funds_claimed = self.get_claimable_balances().is_empty(); let current_height = self.current_best_block().height; @@ -2042,11 +2047,10 @@ impl ChannelMonitor { // once processed, implies the preimage exists in the corresponding inbound channel. let preimages_not_needed_elsewhere = inner.pending_monitor_events.is_empty(); - const BLOCKS_THRESHOLD: u32 = 4032; // ~four weeks match (inner.balances_empty_height, is_all_funds_claimed, preimages_not_needed_elsewhere) { (Some(balances_empty_height), true, true) => { // Claimed all funds, check if reached the blocks threshold. - (current_height >= balances_empty_height + BLOCKS_THRESHOLD, false) + (current_height >= balances_empty_height + ARCHIVAL_DELAY_BLOCKS, false) }, (Some(_), false, _)|(Some(_), _, false) => { // previously assumed we claimed all funds, but we have new funds to claim or @@ -2066,7 +2070,7 @@ impl ChannelMonitor { // None. It is set to the current block height. log_debug!(logger, "ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks", - inner.get_funding_txo().0, BLOCKS_THRESHOLD); + inner.get_funding_txo().0, ARCHIVAL_DELAY_BLOCKS); inner.balances_empty_height = Some(current_height); (false, true) }, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 9556e988b4e..e2c76643348 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -10,7 +10,7 @@ //! Further functional tests which test blockchain reorganizations. use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; @@ -246,19 +246,19 @@ fn archive_fully_resolved_monitors() { // At this point, both nodes have no more `Balance`s, but nodes[0]'s `ChannelMonitor` still // hasn't had the `MonitorEvent` that contains the preimage claimed by the `ChannelManager`. - // Thus, calling `archive_fully_resolved_channel_monitors` and waiting 4032 blocks will not - // result in the `ChannelMonitor` being archived. + // Thus, calling `archive_fully_resolved_channel_monitors` and waiting `ARCHIVAL_DELAY_BLOCKS` + // blocks will not result in the `ChannelMonitor` being archived. nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4032); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly 4032 - // blocks. + // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly + // `ARCHIVAL_DELAY_BLOCKS` blocks. nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[1], 4031); + connect_blocks(&nodes[1], ARCHIVAL_DELAY_BLOCKS - 1); nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[1], 1); @@ -266,11 +266,11 @@ fn archive_fully_resolved_monitors() { assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 0); // Finally, we process the pending `MonitorEvent` from nodes[0], allowing the `ChannelMonitor` - // to be archived 4032 blocks later. + // to be archived `ARCHIVAL_DELAY_BLOCKS` blocks later. expect_payment_sent(&nodes[0], payment_preimage, None, true, false); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4031); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS - 1); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[0], 1); From 8c49359ef5a067e6f9f9d31de09d2a7d5cc0f80b Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 24 Jan 2025 11:14:04 +0100 Subject: [PATCH 16/16] `OutputSweeper`: Delay pruning until monitors have likely been archived Previously, we would prune tracked descriptors once we see a spend hit `ANTI_REORG_DELAY = 6` confirmations. However, this could lead to a scenario where lingering `ChannelMonitor`s waiting to be archived would still regenerate and replay `Event::SpendableOutput`s, i.e., we would re-add the same (now unspendable due to be actually being already spent) outputs again after having intially pruned them. Here, we therefore keep the tracked descriptors around for longer, in particular at least `ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY = 4038` confirmations, at which point we assume the lingering monitors to have been likely archived, and it's 'safe' for us to also forget about the descriptors. --- lightning-background-processor/src/lib.rs | 6 +++--- lightning/src/util/sweep.rs | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index af7c7ffb003..adcae564f56 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1099,7 +1099,7 @@ mod tests { SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::Writeable; - use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; + use lightning::util::sweep::{OutputSpendStatus, OutputSweeper, PRUNE_DELAY_BLOCKS}; use lightning::util::test_utils; use lightning::{get_event, get_event_msg}; use lightning_persister::fs_store::FilesystemStore; @@ -2282,8 +2282,8 @@ mod tests { } // Check we stop tracking the spendable outputs when one of the txs reaches - // ANTI_REORG_DELAY confirmations. - confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY); + // PRUNE_DELAY_BLOCKS confirmations. + confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, PRUNE_DELAY_BLOCKS); assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0); if !std::thread::panicking() { diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 78acef9d727..0022e5286d2 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -9,7 +9,7 @@ //! sweeping them. use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use crate::chain::channelmonitor::ANTI_REORG_DELAY; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS}; use crate::chain::{self, BestBlock, Confirm, Filter, Listen, WatchedOutput}; use crate::io; use crate::ln::msgs::DecodeError; @@ -32,6 +32,9 @@ use bitcoin::{BlockHash, Transaction, Txid}; use core::ops::Deref; +/// The number of blocks we wait before we prune the tracked spendable outputs. +pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; + /// The state of a spendable output currently tracked by an [`OutputSweeper`]. #[derive(Clone, Debug, PartialEq, Eq)] pub struct TrackedSpendableOutput { @@ -101,7 +104,11 @@ pub enum OutputSpendStatus { latest_spending_tx: Transaction, }, /// A transaction spending the output has been confirmed on-chain but will be tracked until it - /// reaches [`ANTI_REORG_DELAY`] confirmations. + /// reaches at least [`PRUNE_DELAY_BLOCKS`] confirmations to ensure [`Event::SpendableOutputs`] + /// stemming from lingering [`ChannelMonitor`]s can safely be replayed. + /// + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor PendingThresholdConfirmations { /// The hash of the chain tip when we first broadcast a transaction spending this output. first_broadcast_hash: BlockHash, @@ -524,7 +531,9 @@ where // Prune all outputs that have sufficient depth by now. sweeper_state.outputs.retain(|o| { if let Some(confirmation_height) = o.status.confirmation_height() { - if cur_height >= confirmation_height + ANTI_REORG_DELAY - 1 { + // We wait at least `PRUNE_DELAY_BLOCKS` as before that + // `Event::SpendableOutputs` from lingering monitors might get replayed. + if cur_height >= confirmation_height + PRUNE_DELAY_BLOCKS - 1 { log_debug!(self.logger, "Pruning swept output as sufficiently confirmed via spend in transaction {:?}. Pruned descriptor: {:?}", o.status.latest_spending_tx().map(|t| t.compute_txid()), o.descriptor