diff --git a/Cargo.toml b/Cargo.toml index dc3eb92c7e2..2f73851d114 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ exclude = [ "lightning-transaction-sync", + "lightning-tests", "no-std-check", "msrv-no-dev-deps-check", "bench", diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 1d11a5fd624..3be6afde89a 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -57,6 +57,12 @@ for DIR in "${WORKSPACE_MEMBERS[@]}"; do cargo doc -p "$DIR" --document-private-items done +echo -e "\n\nTesting upgrade from prior versions of LDK" +pushd lightning-tests +[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose +cargo test +popd + echo -e "\n\nChecking and testing Block Sync Clients with features" cargo test -p lightning-block-sync --verbose --color always --features rest-client diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 73e4f88f1f3..1f4e7c24152 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -382,7 +382,7 @@ impl SignerProvider for KeyProvider { channel_keys_id, ); let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed); - TestChannelSigner::new_with_revoked(keys, revoked_commitment, false) + TestChannelSigner::new_with_revoked(keys, revoked_commitment, false, false) } fn read_chan_signer(&self, buffer: &[u8]) -> Result { @@ -391,7 +391,7 @@ impl SignerProvider for KeyProvider { let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?; let state = self.make_enforcement_state_cell(inner.commitment_seed); - Ok(TestChannelSigner::new_with_revoked(inner, state, false)) + Ok(TestChannelSigner::new_with_revoked(inner, state, false, false)) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index c1f2dd11b1e..143f69f160e 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -522,6 +522,7 @@ impl SignerProvider for KeyProvider { }, state, false, + false, ) } @@ -529,7 +530,7 @@ impl SignerProvider for KeyProvider { let inner: InMemorySigner = ReadableArgs::read(&mut data, self)?; let state = Arc::new(Mutex::new(EnforcementState::new())); - Ok(TestChannelSigner::new_with_revoked(inner, state, false)) + Ok(TestChannelSigner::new_with_revoked(inner, state, false, false)) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml new file mode 100644 index 00000000000..23c81fae4a3 --- /dev/null +++ b/lightning-tests/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "lightning-tests" +version = "0.0.1" +authors = ["Matt Corallo"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/lightningdevkit/rust-lightning/" +description = "Tests for LDK crates" +edition = "2021" + +[features] + +[dependencies] +lightning-types = { path = "../lightning-types", features = ["_test_utils"] } +lightning-invoice = { path = "../lightning-invoice", default-features = false } +lightning-macros = { path = "../lightning-macros" } +lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_1 = { package = "lightning", version = "0.1.1", features = ["_test_utils"] } +lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } + +bitcoin = { version = "0.32.2", default-features = false } + +[dev-dependencies] diff --git a/lightning-tests/src/lib.rs b/lightning-tests/src/lib.rs new file mode 100644 index 00000000000..c028193d692 --- /dev/null +++ b/lightning-tests/src/lib.rs @@ -0,0 +1,5 @@ +#[cfg_attr(test, macro_use)] +extern crate lightning; + +#[cfg(all(test, not(taproot)))] +pub mod upgrade_downgrade_tests; diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs new file mode 100644 index 00000000000..2b57cd23a9a --- /dev/null +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -0,0 +1,215 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of +//! LDK. + +use lightning_0_1::get_monitor as get_monitor_0_1; +use lightning_0_1::ln::functional_test_utils as lightning_0_1_utils; +use lightning_0_1::util::ser::Writeable as _; + +use lightning_0_0_125::chain::ChannelMonitorUpdateStatus as ChannelMonitorUpdateStatus_0_0_125; +use lightning_0_0_125::check_added_monitors as check_added_monitors_0_0_125; +use lightning_0_0_125::events::ClosureReason as ClosureReason_0_0_125; +use lightning_0_0_125::expect_payment_claimed as expect_payment_claimed_0_0_125; +use lightning_0_0_125::get_htlc_update_msgs as get_htlc_update_msgs_0_0_125; +use lightning_0_0_125::get_monitor as get_monitor_0_0_125; +use lightning_0_0_125::get_revoke_commit_msgs as get_revoke_commit_msgs_0_0_125; +use lightning_0_0_125::ln::channelmanager::PaymentId as PaymentId_0_0_125; +use lightning_0_0_125::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_0_125; +use lightning_0_0_125::ln::functional_test_utils as lightning_0_0_125_utils; +use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; +use lightning_0_0_125::routing::router as router_0_0_125; +use lightning_0_0_125::util::ser::Writeable as _; + +use lightning::ln::functional_test_utils::*; + +use lightning_types::payment::PaymentPreimage; + +#[test] +fn simple_upgrade() { + // Tests a simple case of upgrading from LDK 0.1 with a pending payment + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage); + { + let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_0_1_utils::create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = lightning_0_1_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = lightning_0_1_utils::create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = lightning_0_1_utils::create_announced_chan_between_nodes(&nodes, 0, 1).2; + + let payment_preimage = + lightning_0_1_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + preimage = PaymentPreimage(payment_preimage.0 .0); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + mon_a_ser = get_monitor_0_1!(nodes[0], chan_id).encode(); + mon_b_ser = get_monitor_0_1!(nodes[1], chan_id).encode(); + } + + // Create a dummy node to reload over with the 0.1 state + + let mut chanmon_cfgs = create_chanmon_cfgs(2); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_a, persister_b, chain_mon_a, chain_mon_b); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let (node_a, node_b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + + claim_payment(&nodes[0], &[&nodes[1]], preimage); +} + +#[test] +fn test_125_dangling_post_update_actions() { + // Tests a failure of upgrading from 0.0.125 to 0.1 when there's a dangling + // `MonitorUpdateCompletionAction` due to the bug fixed in + // 93b4479e472e6767af5df90fecdcdfb79074e260. + let (node_d_ser, mon_ser); + { + // First, we get RAA-source monitor updates held by using async persistence (note that this + // issue was first identified as a consequence of the bug fixed in + // 93b4479e472e6767af5df90fecdcdfb79074e260 but in order to replicate that bug we need a + // complicated multi-threaded race that is not deterministic, thus we "cheat" here by using + // async persistence). We do this by simply claiming an MPP payment and not completing the + // second channel's `ChannelMonitorUpdate`, blocking RAA `ChannelMonitorUpdate`s from the + // first (which is ultimately a very similar bug to the one fixed in 93b4479e472e6767af5df). + // + // Then, we claim a second payment on the channel, which ultimately doesn't have its + // `ChannelMonitorUpdate` completion handled due to the presence of the blocked + // `ChannelMonitorUpdate`. The claim also generates a post-update completion action, but + // the `ChannelMonitorUpdate` isn't queued due to the RAA-update block. + let chanmon_cfgs = lightning_0_0_125_utils::create_chanmon_cfgs(4); + let node_cfgs = lightning_0_0_125_utils::create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_0_125_utils::create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = lightning_0_0_125_utils::create_network(4, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + + lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 100_000, 0, + ); + lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 2, 100_000, 0, + ); + let chan_id_1_3 = lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 3, 100_000, 0, + ) + .2; + let chan_id_2_3 = lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 2, 3, 100_000, 0, + ) + .2; + + let (preimage, hash, secret) = + lightning_0_0_125_utils::get_payment_preimage_hash(&nodes[3], Some(15_000_000), None); + + let pay_params = router_0_0_125::PaymentParameters::from_node_id( + node_d_id, + lightning_0_0_125_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_0_125::RouteParameters::from_payment_params_and_value(pay_params, 15_000_000); + let route = lightning_0_0_125_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_0_125::secret_only(secret); + let id = PaymentId_0_0_125(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + check_added_monitors_0_0_125!(nodes[0], 2); + let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]]]; + lightning_0_0_125_utils::pass_along_route(&nodes[0], paths, 15_000_000, hash, secret); + + let preimage_2 = lightning_0_0_125_utils::route_payment(&nodes[1], &[&nodes[3]], 100_000).0; + + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + nodes[3].node.claim_funds(preimage); + check_added_monitors_0_0_125!(nodes[3], 2); + + let (outpoint, update_id, _) = { + let latest_monitors = nodes[3].chain_monitor.latest_monitor_update_id.lock().unwrap(); + latest_monitors.get(&chan_id_1_3).unwrap().clone() + }; + nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, update_id).unwrap(); + expect_payment_claimed_0_0_125!(nodes[3], hash, 15_000_000); + + let ds_fulfill = get_htlc_update_msgs_0_0_125!(nodes[3], node_b_id); + // Due to an unrelated test bug in 0.0.125, we have to leave the `ChannelMonitorUpdate` for + // the previous node un-completed or we will panic when dropping the `Node`. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + nodes[1].node.handle_update_fulfill_htlc(&node_d_id, &ds_fulfill.update_fulfill_htlcs[0]); + check_added_monitors_0_0_125!(nodes[1], 1); + + nodes[1].node.handle_commitment_signed(&node_d_id, &ds_fulfill.commitment_signed); + check_added_monitors_0_0_125!(nodes[1], 1); + + // The `ChannelMonitorUpdate` generated by the RAA from node B to node D will be blocked. + let (bs_raa, _) = get_revoke_commit_msgs_0_0_125!(nodes[1], node_d_id); + nodes[3].node.handle_revoke_and_ack(&node_b_id, &bs_raa); + check_added_monitors_0_0_125!(nodes[3], 0); + + // Now that there is a blocked update in the B <-> D channel, we can claim the second + // payment across it, which, while it will generate a `ChannelMonitorUpdate`, will not + // complete its post-update actions. + nodes[3].node.claim_funds(preimage_2); + check_added_monitors_0_0_125!(nodes[3], 1); + + // Finally, we set up the failure by force-closing the channel in question, ensuring that + // 0.1 will not create a per-peer state for node B. + let err = "Force Closing Channel".to_owned(); + nodes[3].node.force_close_without_broadcasting_txn(&chan_id_1_3, &node_b_id, err).unwrap(); + let reason = + ClosureReason_0_0_125::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + let peers = &[node_b_id]; + lightning_0_0_125_utils::check_closed_event(&nodes[3], 1, reason, false, peers, 100_000); + lightning_0_0_125_utils::check_closed_broadcast(&nodes[3], 1, true); + check_added_monitors_0_0_125!(nodes[3], 1); + + node_d_ser = nodes[3].node.encode(); + mon_ser = get_monitor_0_0_125!(nodes[3], chan_id_2_3).encode(); + } + + // Create a dummy node to reload over with the 0.0.125 state + + let mut chanmon_cfgs = create_chanmon_cfgs(4); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[3].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let (persister, chain_mon); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let node; + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + // Finally, reload the node in the latest LDK. This previously failed. + let config = test_default_channel_config(); + reload_node!(nodes[3], config, &node_d_ser, &[&mon_ser], persister, chain_mon, node); +} diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c15a4bee643..96492ef97f2 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5039,7 +5039,12 @@ impl Channel where if update_fee { debug_assert!(!self.context.is_outbound()); let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; - if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { + let total_anchor_sats = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; + if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + total_anchor_sats * 1000 + counterparty_reserve_we_require_msat { return Err(ChannelError::close("Funding remote cannot afford proposed new fee".to_owned())); } } @@ -5772,7 +5777,12 @@ impl Channel where let commitment_stats = self.context.build_commitment_transaction(self.holder_commitment_point.transaction_number(), &keys, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat; - if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { + let total_anchor_sats = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; + if holder_balance_msat < buffer_fee_msat + total_anchor_sats * 1000 + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); return None; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 33ad59a4139..b973432056a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -13582,9 +13582,17 @@ where $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { let mut max_in_flight_update_id = 0; + let starting_len = $chan_in_flight_upds.len(); $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); + if $chan_in_flight_upds.len() < starting_len { + log_debug!( + $logger, + "{} ChannelMonitorUpdates completed after ChannelManager was last serialized", + starting_len - $chan_in_flight_upds.len() + ); + } for update in $chan_in_flight_upds.iter() { - log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", + log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", update.update_id, $channel_info_log, &$monitor.channel_id()); max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); pending_background_events.push( @@ -14148,11 +14156,31 @@ where debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue"); } } + // Note that we may have a post-update action for a channel that has no pending + // `ChannelMonitorUpdate`s, but unlike the no-peer-state case, it may simply be + // because we had a `ChannelMonitorUpdate` complete after the last time this + // `ChannelManager` was serialized. In that case, we'll run the post-update + // actions as soon as we get going. } peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; } else { - log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id); - return Err(DecodeError::InvalidValue); + for actions in monitor_update_blocked_actions.values() { + for action in actions.iter() { + if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) { + // If there are no state for this channel but we have pending + // post-update actions, its possible that one was left over from pre-0.1 + // payment claims where MPP claims led to a channel blocked on itself + // and later `ChannelMonitorUpdate`s didn't get their post-update + // actions run. + // This should only have happened for `PaymentClaimed` post-update actions, + // which we ignore here. + } else { + let logger = WithContext::from(&args.logger, Some(node_id), None, None); + log_error!(logger, "Got blocked actions {:?} without a per-peer-state for {}", monitor_update_blocked_actions, node_id); + return Err(DecodeError::InvalidValue); + } + } + } } } @@ -14237,6 +14265,22 @@ where if payment_claim.mpp_parts.is_empty() { return Err(DecodeError::InvalidValue); } + { + let payments = channel_manager.claimable_payments.lock().unwrap(); + if !payments.claimable_payments.contains_key(&payment_hash) { + if let Some(payment) = payments.pending_claiming_payments.get(&payment_hash) { + if payment.payment_id == payment_claim.claiming_payment.payment_id { + // If this payment already exists and was marked as + // being-claimed then the serialized state must contain all + // of the pending `ChannelMonitorUpdate`s required to get + // the preimage on disk in all MPP parts. Thus we can skip + // the replay below. + continue; + } + } + } + } + let mut channels_without_preimage = payment_claim.mpp_parts.iter() .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) .collect::>(); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index be77547b79c..04295073861 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1196,7 +1196,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User node_deserialized } -#[cfg(test)] +#[macro_export] macro_rules! reload_node { ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let chanman_encoded = $chanman_encoded; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index bdb1621771f..fc8bf38519d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -24,7 +24,7 @@ use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; +use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; @@ -673,28 +673,49 @@ fn test_update_fee_vanilla() { check_added_monitors!(nodes[1], 1); } -#[test] -fn test_update_fee_that_funder_cannot_afford() { +pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: ChannelTypeFeatures) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + + let mut default_config = test_default_channel_config(); + if channel_type_features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + // this setting is also needed to create an anchor channel + default_config.manually_accept_inbound_channels = true; + } + + let node_chanmgrs = create_node_chanmgrs( + 2, + &node_cfgs, + &[Some(default_config.clone()), Some(default_config.clone())], + ); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_value = 5000; let push_sats = 700; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); let channel_id = chan.2; let secp_ctx = Secp256k1::new(); - let default_config = UserConfig::default(); let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + let (anchor_outputs_value_sats, outputs_num_no_htlcs) = + if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + (ANCHOR_OUTPUT_VALUE_SATOSHI * 2, 4) + } else { + (0, 2) + }; // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we // calculate two different feerates here - the expected local limit as well as the expected // remote limit. - let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; - let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32; + let feerate = + ((channel_value - bs_channel_reserve_sats - push_sats - anchor_outputs_value_sats) * 1000 + / (commitment_tx_base_weight(&channel_type_features) + + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; + let non_buffer_feerate = + ((channel_value - bs_channel_reserve_sats - push_sats - anchor_outputs_value_sats) * 1000 + / commitment_tx_base_weight(&channel_type_features)) as u32; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = feerate; @@ -711,8 +732,8 @@ fn test_update_fee_that_funder_cannot_afford() { { let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); - //We made sure neither party's funds are below the dust limit and there are no HTLCs here - assert_eq!(commitment_tx.output.len(), 2); + // We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), outputs_num_no_htlcs); let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); actual_fee = channel_value - actual_fee; @@ -771,7 +792,7 @@ fn test_update_fee_that_funder_cannot_afford() { let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( INITIAL_COMMITMENT_NUMBER - 1, push_sats, - channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, + channel_value - push_sats - anchor_outputs_value_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, local_funding, remote_funding, commit_tx_keys.clone(), non_buffer_feerate + 4, @@ -808,6 +829,14 @@ fn test_update_fee_that_funder_cannot_afford() { [nodes[0].node.get_our_node_id()], channel_value); } +#[test] +pub fn test_update_fee_that_funder_cannot_afford() { + do_test_update_fee_that_funder_cannot_afford(ChannelTypeFeatures::only_static_remote_key()); + do_test_update_fee_that_funder_cannot_afford( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + ); +} + #[test] fn test_update_fee_with_fundee_update_add_htlc() { let chanmon_cfgs = create_chanmon_cfgs(2); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index eaeb3e7bac4..16904d85758 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -781,7 +781,7 @@ fn test_forwardable_regen() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); } -fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { +fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_restart: bool) { // Test what happens if a node receives an MPP payment, claims it, but crashes before // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still @@ -797,11 +797,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // definitely claimed. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let persister; - let new_chain_monitor; + let (persist_d_1, persist_d_2); + let (chain_d_1, chain_d_2); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); - let nodes_3_deserialized; + let (node_d_1, node_d_2); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); @@ -876,7 +876,14 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { } // Now restart nodes[3]. - reload_node!(nodes[3], original_manager, &[&updated_monitor.0, &original_monitor.0], persister, new_chain_monitor, nodes_3_deserialized); + reload_node!(nodes[3], original_manager.clone(), &[&updated_monitor.0, &original_monitor.0], persist_d_1, chain_d_1, node_d_1); + + if double_restart { + // Previously, we had a bug where we'd fail to reload if we re-persist the `ChannelManager` + // without updating any `ChannelMonitor`s as we'd fail to double-initiate the claim replay. + // We test that here ensuring that we can reload again. + reload_node!(nodes[3], node_d_1.encode(), &[&updated_monitor.0, &original_monitor.0], persist_d_2, chain_d_2, node_d_2); + } // Until the startup background events are processed (in `get_and_clear_pending_events`, // below), the preimage is not copied to the non-persisted monitor... @@ -971,8 +978,10 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { #[test] fn test_partial_claim_before_restart() { - do_test_partial_claim_before_restart(false); - do_test_partial_claim_before_restart(true); + do_test_partial_claim_before_restart(false, false); + do_test_partial_claim_before_restart(false, true); + do_test_partial_claim_before_restart(true, false); + do_test_partial_claim_before_restart(true, true); } fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool, use_intercept: bool) { diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index f076e6a9da4..c326cfca804 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -1172,8 +1172,8 @@ where } } - #[cfg(test)] - pub(crate) fn set_offers_handler(&mut self, offers_handler: OMH) { + #[cfg(any(test, feature = "_test_utils"))] + pub fn set_offers_handler(&mut self, offers_handler: OMH) { self.offers_handler = offers_handler; } diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index f3ef4dc1557..2e1289b2eb0 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -71,6 +71,7 @@ pub struct TestChannelSigner { /// Channel state used for policy enforcement pub state: Arc>, pub disable_revocation_policy_check: bool, + pub disable_all_state_policy_checks: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -124,6 +125,7 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check: false, + disable_all_state_policy_checks: false, } } @@ -132,12 +134,11 @@ impl TestChannelSigner { /// Since there are multiple copies of this struct for each channel, some coordination is needed /// so that all copies are aware of enforcement state. A pointer to this state is provided /// here, usually by an implementation of KeysInterface. - pub fn new_with_revoked(inner: InMemorySigner, state: Arc>, disable_revocation_policy_check: bool) -> Self { - Self { - inner, - state, - disable_revocation_policy_check, - } + pub fn new_with_revoked( + inner: InMemorySigner, state: Arc>, + disable_revocation_policy_check: bool, disable_all_state_policy_checks: bool, + ) -> Self { + Self { inner, state, disable_revocation_policy_check, disable_all_state_policy_checks } } pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features().unwrap() } @@ -177,19 +178,26 @@ impl ChannelSigner for TestChannelSigner { if !self.is_signer_available(SignerOp::ReleaseCommitmentSecret) { return Err(()); } - { - let mut state = self.state.lock().unwrap(); + let mut state = self.state.lock().unwrap(); + if !self.disable_all_state_policy_checks { assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment); assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment); - state.last_holder_revoked_commitment = idx; } + state.last_holder_revoked_commitment = idx; self.inner.release_commitment_secret(idx) } fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec) -> Result<(), ()> { let mut state = self.state.lock().unwrap(); let idx = holder_tx.commitment_number(); - assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment); + if !self.disable_all_state_policy_checks { + assert!( + idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, + "expecting to validate the current or next holder commitment - trying {}, current {}", + idx, + state.last_holder_commitment + ); + } state.last_holder_commitment = idx; Ok(()) } @@ -200,7 +208,9 @@ impl ChannelSigner for TestChannelSigner { return Err(()); } let mut state = self.state.lock().unwrap(); - assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment); + if !self.disable_all_state_policy_checks { + assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment); + } state.last_counterparty_revoked_commitment = idx; Ok(()) } @@ -218,22 +228,28 @@ impl EcdsaChannelSigner for TestChannelSigner { fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec, outbound_htlc_preimages: Vec, secp_ctx: &Secp256k1) -> Result<(Signature, Vec), ()> { self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx); - { - #[cfg(test)] - if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { - return Err(()); - } - let mut state = self.state.lock().unwrap(); - let actual_commitment_number = commitment_tx.commitment_number(); - let last_commitment_number = state.last_counterparty_commitment; + #[cfg(test)] + if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { + return Err(()); + } + let mut state = self.state.lock().unwrap(); + let actual_commitment_number = commitment_tx.commitment_number(); + let last_commitment_number = state.last_counterparty_commitment; + if !self.disable_all_state_policy_checks { // These commitment numbers are backwards counting. We expect either the same as the previously encountered, // or the next one. assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number); // Ensure that the counterparty doesn't get more than two broadcastable commitments - // the last and the one we are trying to sign - assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment); - state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number) + assert!( + actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, + "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", + actual_commitment_number, + state.last_counterparty_revoked_commitment + ); } + state.last_counterparty_commitment = + cmp::min(last_commitment_number, actual_commitment_number); Ok(self.inner.sign_counterparty_commitment(commitment_tx, inbound_htlc_preimages, outbound_htlc_preimages, secp_ctx).unwrap()) } @@ -244,12 +260,14 @@ impl EcdsaChannelSigner for TestChannelSigner { return Err(()); } let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx); - let state = self.state.lock().unwrap(); - let commitment_number = trusted_tx.commitment_number(); - if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number { - if !self.disable_revocation_policy_check { - panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", - state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0]) + if !self.disable_all_state_policy_checks { + let state = self.state.lock().unwrap(); + let commitment_number = trusted_tx.commitment_number(); + if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number { + if !self.disable_revocation_policy_check { + panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", + state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0]) + } } } Ok(self.inner.sign_holder_commitment(commitment_tx, secp_ctx).unwrap()) @@ -284,13 +302,15 @@ impl EcdsaChannelSigner for TestChannelSigner { if !self.is_signer_available(SignerOp::SignHolderHtlcTransaction) { return Err(()); } - let state = self.state.lock().unwrap(); - if state.last_holder_revoked_commitment - 1 != htlc_descriptor.per_commitment_number && - state.last_holder_revoked_commitment - 2 != htlc_descriptor.per_commitment_number - { - if !self.disable_revocation_policy_check { - panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", - state.last_holder_revoked_commitment, htlc_descriptor.per_commitment_number, self.inner.commitment_seed[0]) + if !self.disable_all_state_policy_checks { + let state = self.state.lock().unwrap(); + if state.last_holder_revoked_commitment - 1 != htlc_descriptor.per_commitment_number && + state.last_holder_revoked_commitment - 2 != htlc_descriptor.per_commitment_number + { + if !self.disable_revocation_policy_check { + panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", + state.last_holder_revoked_commitment, htlc_descriptor.per_commitment_number, self.inner.commitment_seed[0]) + } } } assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input()); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 5bd5acaf176..317b46a02ef 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -327,7 +327,8 @@ impl SignerProvider for OnlyReadsKeysInterface { Ok(TestChannelSigner::new_with_revoked( inner, state, - false + false, + false, )) } @@ -1263,7 +1264,8 @@ pub struct TestKeysInterface { pub backing: sign::PhantomKeysManager, pub override_random_bytes: Mutex>, pub disable_revocation_policy_check: bool, - enforcement_states: Mutex>>>, + pub disable_all_state_policy_checks: bool, + enforcement_states: Mutex>>>, expectations: Mutex>>, pub unavailable_signers_ops: Mutex>>, pub next_signer_disabled_ops: Mutex>, @@ -1319,7 +1321,9 @@ impl SignerProvider for TestKeysInterface { fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner { let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id); let state = self.make_enforcement_state_cell(keys.commitment_seed); - let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check); + let rev_checks = self.disable_revocation_policy_check; + let state_checks = self.disable_all_state_policy_checks; + let signer = TestChannelSigner::new_with_revoked(keys, state, rev_checks, state_checks); #[cfg(test)] if let Some(ops) = self.unavailable_signers_ops.lock().unwrap().get(&channel_keys_id) { for &op in ops { @@ -1342,7 +1346,8 @@ impl SignerProvider for TestKeysInterface { Ok(TestChannelSigner::new_with_revoked( inner, state, - self.disable_revocation_policy_check + self.disable_revocation_policy_check, + self.disable_all_state_policy_checks, )) } @@ -1366,6 +1371,7 @@ impl TestKeysInterface { backing: sign::PhantomKeysManager::new(seed, now.as_secs(), now.subsec_nanos(), seed), override_random_bytes: Mutex::new(None), disable_revocation_policy_check: false, + disable_all_state_policy_checks: false, enforcement_states: Mutex::new(new_hash_map()), expectations: Mutex::new(None), unavailable_signers_ops: Mutex::new(new_hash_map()),