diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 97a74871ea4..617050f2427 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -1608,14 +1608,7 @@ mod tests { .unwrap() .entry((record.module_path.to_string(), format!("{}", record.args))) .or_insert(0) += 1; - println!( - "{:<5} [{} : {}, {}] {}", - record.level.to_string(), - record.module_path, - record.file, - record.line, - record.args - ); + println!("{}", record); } } diff --git a/fuzz/src/onion_message.rs b/fuzz/src/onion_message.rs index 934d748d6a5..09634a1c373 100644 --- a/fuzz/src/onion_message.rs +++ b/fuzz/src/onion_message.rs @@ -102,7 +102,11 @@ impl MessageRouter for TestMessageRouter { fn find_path( &self, _sender: PublicKey, _peers: Vec, destination: Destination, ) -> Result { - Ok(OnionMessagePath { intermediate_nodes: vec![], destination, first_node_addresses: vec![] }) + Ok(OnionMessagePath { + intermediate_nodes: vec![], + destination, + first_node_addresses: vec![], + }) } fn create_blinded_paths( @@ -328,14 +332,7 @@ mod tests { let mut lines_lock = self.lines.lock().unwrap(); let key = (record.module_path.to_string(), format!("{}", record.args)); *lines_lock.entry(key).or_insert(0) += 1; - println!( - "{:<5} [{} : {}, {}] {}", - record.level.to_string(), - record.module_path, - record.file, - record.line, - record.args - ); + println!("{}", record); } } diff --git a/fuzz/src/utils/test_logger.rs b/fuzz/src/utils/test_logger.rs index 6d9de02e387..f8369879447 100644 --- a/fuzz/src/utils/test_logger.rs +++ b/fuzz/src/utils/test_logger.rs @@ -59,15 +59,6 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> { impl Logger for TestLogger { fn log(&self, record: Record) { - write!( - LockedWriteAdapter(&self.out), - "{:<5} {} [{} : {}] {}\n", - record.level.to_string(), - self.id, - record.module_path, - record.line, - record.args - ) - .unwrap(); + write!(LockedWriteAdapter(&self.out), "{:<6} {}", self.id, record).unwrap(); } } diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index f5b1d53fc8a..5e069574af7 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -192,7 +192,7 @@ mod test { } impl Logger for TestLogger { fn log(&self, record: lightning::util::logger::Record) { - eprintln!("{}: {}", self.node, record.args); + eprintln!("{:<8} {}", self.node, record); } } impl Deref for TestLogger { diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 068f77a84bb..5d620ec4413 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -629,14 +629,7 @@ mod tests { pub struct TestLogger(); impl lightning::util::logger::Logger for TestLogger { fn log(&self, record: lightning::util::logger::Record) { - println!( - "{:<5} [{} : {}, {}] {}", - record.level.to_string(), - record.module_path, - record.file, - record.line, - record.args - ); + println!("{}", record); } } diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 9df4a725e54..2d3d2a8d967 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -23,6 +23,7 @@ _externalize_tests = ["inventory", "_test_utils"] # Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling). # This is unsafe to use in production because it may result in the counterparty publishing taking our funds. unsafe_revoked_tx_signing = [] +safe_channels = [] std = [] diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index d8d6c90921f..d5b1c5e3dcf 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -580,11 +580,7 @@ where let has_pending_claims = monitor_state.monitor.has_pending_claims(); if has_pending_claims || get_partition_key(channel_id) % partition_factor == 0 { - log_trace!( - logger, - "Syncing Channel Monitor for channel {}", - log_funding_info!(monitor) - ); + log_trace!(logger, "Syncing Channel Monitor"); // Even though we don't track monitor updates from chain-sync as pending, we still want // updates per-channel to be well-ordered so that users don't see a // `ChannelMonitorUpdate` after a channel persist for a channel with the same @@ -592,11 +588,9 @@ where let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); match self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor) { - ChannelMonitorUpdateStatus::Completed => log_trace!( - logger, - "Finished syncing Channel Monitor for channel {} for block-data", - log_funding_info!(monitor) - ), + ChannelMonitorUpdateStatus::Completed => { + log_trace!(logger, "Finished syncing Channel Monitor for block-data") + }, ChannelMonitorUpdateStatus::InProgress => { log_trace!( logger, @@ -961,16 +955,12 @@ where } if have_monitors_to_prune { let mut monitors = self.monitors.write().unwrap(); - monitors.retain(|channel_id, monitor_holder| { + monitors.retain(|_channel_id, monitor_holder| { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); let (is_fully_resolved, _) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { - log_info!( - logger, - "Archiving fully resolved ChannelMonitor for channel ID {}", - channel_id - ); + log_info!(logger, "Archiving fully resolved ChannelMonitor"); self.persister .archive_persisted_channel(monitor_holder.monitor.persistence_key()); false @@ -1106,11 +1096,7 @@ where }, hash_map::Entry::Vacant(e) => e, }; - log_trace!( - logger, - "Loaded existing ChannelMonitor for channel {}", - log_funding_info!(monitor) - ); + log_trace!(logger, "Loaded existing ChannelMonitor"); if let Some(ref chain_source) = self.chain_source { monitor.load_outputs_to_watch(chain_source, &self.logger); } @@ -1366,25 +1352,17 @@ where }, hash_map::Entry::Vacant(e) => e, }; - log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); + log_trace!(logger, "Got new ChannelMonitor"); let update_id = monitor.get_latest_update_id(); let mut pending_monitor_updates = Vec::new(); let persist_res = self.persister.persist_new_channel(monitor.persistence_key(), &monitor); match persist_res { ChannelMonitorUpdateStatus::InProgress => { - log_info!( - logger, - "Persistence of new ChannelMonitor for channel {} in progress", - log_funding_info!(monitor) - ); + log_info!(logger, "Persistence of new ChannelMonitor in progress",); pending_monitor_updates.push(update_id); }, ChannelMonitorUpdateStatus::Completed => { - log_info!( - logger, - "Persistence of new ChannelMonitor for channel {} completed", - log_funding_info!(monitor) - ); + log_info!(logger, "Persistence of new ChannelMonitor completed",); }, ChannelMonitorUpdateStatus::UnrecoverableError => { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; @@ -1452,7 +1430,7 @@ where // We don't want to persist a `monitor_update` which results in a failure to apply later // while reading `channel_monitor` with updates from storage. Instead, we should persist // the entire `channel_monitor` here. - log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor)); + log_warn!(logger, "Failed to update ChannelMonitor. Going ahead and persisting the entire ChannelMonitor"); self.persister.update_persisted_channel( monitor.persistence_key(), None, @@ -1468,18 +1446,17 @@ where match persist_res { ChannelMonitorUpdateStatus::InProgress => { pending_monitor_updates.push(update_id); - log_debug!(logger, - "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress", + log_debug!( + logger, + "Persistence of ChannelMonitorUpdate id {:?} in progress", update_id, - log_funding_info!(monitor) ); }, ChannelMonitorUpdateStatus::Completed => { log_debug!( logger, - "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed", + "Persistence of ChannelMonitorUpdate id {:?} completed", update_id, - log_funding_info!(monitor) ); }, ChannelMonitorUpdateStatus::UnrecoverableError => { diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 1d035b68650..0ce582379a4 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -50,7 +50,7 @@ use crate::ln::chan_utils::{ self, ChannelTransactionParameters, CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCClaim, HTLCOutputInCommitment, HolderCommitmentTransaction, }; -use crate::ln::channel::INITIAL_COMMITMENT_NUMBER; +use crate::ln::channel::{read_check_data, INITIAL_COMMITMENT_NUMBER}; use crate::ln::channel_keys::{ DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, HtlcKey, RevocationBasepoint, RevocationKey, @@ -111,6 +111,9 @@ pub struct ChannelMonitorUpdate { /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and /// always `Some` otherwise. pub channel_id: Option, + + /// The encoded channel data associated with this ChannelMonitor, if any. + pub encoded_channel: Option>, } impl ChannelMonitorUpdate { @@ -156,6 +159,13 @@ impl Writeable for ChannelMonitorUpdate { for update_step in self.updates.iter() { update_step.write(w)?; } + #[cfg(feature = "safe_channels")] + write_tlv_fields!(w, { + // 1 was previously used to store `counterparty_node_id` + (3, self.channel_id, option), + (5, self.encoded_channel, option) + }); + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(w, { // 1 was previously used to store `counterparty_node_id` (3, self.channel_id, option), @@ -176,11 +186,13 @@ impl Readable for ChannelMonitorUpdate { } } let mut channel_id = None; + let mut encoded_channel = None; read_tlv_fields!(r, { // 1 was previously used to store `counterparty_node_id` (3, channel_id, option), + (5, encoded_channel, option) }); - Ok(Self { update_id, updates, channel_id }) + Ok(Self { update_id, updates, channel_id, encoded_channel }) } } @@ -1402,6 +1414,8 @@ pub(crate) struct ChannelMonitorImpl { /// make deciding whether to do so simple, here we track whether this monitor was last written /// prior to 0.1. written_by_0_1_or_later: bool, + + encoded_channel: Option>, } // Returns a `&FundingScope` for the one we are currently observing/handling commitment transactions @@ -1521,6 +1535,9 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; pub(crate) fn write_chanmon_internal( channel_monitor: &ChannelMonitorImpl, _is_stub: bool, writer: &mut W, ) -> Result<(), Error> { + if let Some(ref encoded_channel) = channel_monitor.encoded_channel { + channel_monitor.check_encoded_channel_consistency(encoded_channel); + } write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); channel_monitor.latest_update_id.write(writer)?; @@ -1733,6 +1750,32 @@ pub(crate) fn write_chanmon_internal( _ => channel_monitor.pending_monitor_events.clone(), }; + #[cfg(feature = "safe_channels")] + write_tlv_fields!(writer, { + (1, channel_monitor.funding_spend_confirmed, option), + (3, channel_monitor.htlcs_resolved_on_chain, required_vec), + (5, pending_monitor_events, required_vec), + (7, channel_monitor.funding_spend_seen, required), + (9, channel_monitor.counterparty_node_id, required), + (11, channel_monitor.confirmed_commitment_tx_counterparty_output, option), + (13, channel_monitor.spendable_txids_confirmed, required_vec), + (15, channel_monitor.counterparty_fulfilled_htlcs, required), + (17, channel_monitor.initial_counterparty_commitment_info, option), + (19, channel_monitor.channel_id, required), + (21, channel_monitor.balances_empty_height, option), + (23, channel_monitor.holder_pays_commitment_tx_fee, option), + (25, channel_monitor.payment_preimages, required), + (27, channel_monitor.first_negotiated_funding_txo, required), + (29, channel_monitor.initial_counterparty_commitment_tx, option), + (31, channel_monitor.funding.channel_parameters, required), + (32, channel_monitor.pending_funding, optional_vec), + (33, channel_monitor.htlcs_resolved_to_user, required), + (34, channel_monitor.alternative_funding_confirmed, option), + (35, channel_monitor.is_manual_broadcast, required), + (37, channel_monitor.funding_seen_onchain, required), + (39, channel_monitor.encoded_channel, option), + }); + #[cfg(not(feature = "safe_channels"))] write_tlv_fields!(writer, { (1, channel_monitor.funding_spend_confirmed, option), (3, channel_monitor.htlcs_resolved_on_chain, required_vec), @@ -1994,6 +2037,7 @@ impl ChannelMonitor { alternative_funding_confirmed: None, written_by_0_1_or_later: true, + encoded_channel: None, }) } @@ -2114,6 +2158,17 @@ impl ChannelMonitor { inner.update_monitor(updates, broadcaster, fee_estimator, &logger) } + /// Gets the encoded channel data, if any, associated with this ChannelMonitor. + pub fn get_encoded_channel(&self) -> Option> { + self.inner.lock().unwrap().encoded_channel.clone() + } + + /// Updates the encoded channel data associated with this ChannelMonitor. To clear the encoded channel data (for + /// example after shut down of a channel), pass an empty vector. + pub fn update_encoded_channel(&self, encoded: Vec) { + self.inner.lock().unwrap().update_encoded_channel(encoded); + } + /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this /// ChannelMonitor. /// @@ -2719,6 +2774,51 @@ impl ChannelMonitor { } impl ChannelMonitorImpl { + fn check_encoded_channel_consistency(&self, encoded: &[u8]) { + let encoded_channel_reader = &mut &encoded[..]; + let check_res = read_check_data(encoded_channel_reader); + if let Ok(check_data) = check_res { + debug_assert!( + check_data.cur_holder_commitment_transaction_number + <= self.get_cur_holder_commitment_number(), + "cur_holder_commitment_transaction_number - channel: {} vs monitor: {}", + check_data.cur_holder_commitment_transaction_number, + self.get_cur_holder_commitment_number() + ); + debug_assert!( + check_data.revoked_counterparty_commitment_transaction_number + <= self.get_min_seen_secret(), + "revoked_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + check_data.revoked_counterparty_commitment_transaction_number, + self.get_min_seen_secret() + ); + debug_assert!( + check_data.cur_counterparty_commitment_transaction_number + <= self.get_cur_counterparty_commitment_number(), + "cur_counterparty_commitment_transaction_number - channel: {} vs monitor: {}", + check_data.cur_counterparty_commitment_transaction_number, + self.get_cur_counterparty_commitment_number() + ); + debug_assert!( + check_data.latest_monitor_update_id >= self.get_latest_update_id(), + "latest_monitor_update_id - channel: {} vs monitor: {}", + check_data.latest_monitor_update_id, + self.get_latest_update_id() + ); + } else { + debug_assert!(false, "Failed to read check data from encoded channel"); + } + } + + fn update_encoded_channel(&mut self, encoded: Vec) { + if encoded.len() > 0 { + self.check_encoded_channel_consistency(&encoded); + self.encoded_channel = Some(encoded); + } else { + self.encoded_channel = None; + } + } + /// Helper for get_claimable_balances which does the work for an individual HTLC, generating up /// to one `Balance` for the HTLC. #[rustfmt::skip] @@ -3475,6 +3575,8 @@ impl ChannelMonitorImpl { return Err("Previous secret did not match new one"); } + println!("Channel monitor current commitment secret index: {}", self.commitment_secrets.get_min_seen_secret()); + // Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill // events for now-revoked/fulfilled HTLCs. let mut removed_fulfilled_htlcs = false; @@ -4223,8 +4325,8 @@ impl ChannelMonitorImpl { log_info!(logger, "Applying pre-0.1 force close update to monitor {} with {} change(s).", log_funding_info!(self), updates.updates.len()); } else { - log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).", - log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len()); + log_info!(logger, "Applying update, bringing update_id from {} to {} with {} change(s).", + self.latest_update_id, updates.update_id, updates.updates.len()); } // ChannelMonitor updates may be applied after force close if we receive a preimage for a @@ -4351,7 +4453,7 @@ impl ChannelMonitorImpl { self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger, true); } else if !self.holder_tx_signed { log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); - log_error!(logger, " in channel monitor for channel {}!", &self.channel_id()); + log_error!(logger, " in channel monitor!"); log_error!(logger, " Read the docs for ChannelMonitor::broadcast_latest_holder_commitment_txn to take manual action!"); } else { // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager @@ -4405,9 +4507,20 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { - log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); - Err(()) + // Assume that if the updates contains no encoded channel, that the channel remained unchanged. We + // therefore do not update the monitor. + if let Some(encoded_channel) = updates.encoded_channel.as_ref() { + self.update_encoded_channel(encoded_channel.clone()); + } + + if ret.is_ok() { + if self.no_further_updates_allowed() && is_pre_close_update { + log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); + Err(()) + } else { + + Ok(()) + } } else { ret } } @@ -5479,7 +5592,7 @@ impl ChannelMonitorImpl { } else { "".to_string() }; - log_info!(logger, "{desc} for channel {} confirmed with txid {txid}{action}", self.channel_id()); + log_info!(logger, "{desc} confirmed with txid {txid}{action}"); self.alternative_funding_confirmed = Some((txid, height)); @@ -5531,8 +5644,7 @@ impl ChannelMonitorImpl { .map(|(txid, _)| txid) .unwrap_or_else(|| self.funding.funding_txid()) ); - log_info!(logger, "Channel {} closed by funding output spend in txid {txid}", - self.channel_id()); + log_info!(logger, "Channel closed by funding output spend in txid {txid}"); if !self.funding_spend_seen { self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(())); } @@ -6645,6 +6757,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut alternative_funding_confirmed = None; let mut is_manual_broadcast = RequiredWrapper(None); let mut funding_seen_onchain = RequiredWrapper(None); + let mut encoded_channel = None; read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -6667,6 +6780,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (39, encoded_channel, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6844,6 +6958,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP alternative_funding_confirmed, written_by_0_1_or_later, + encoded_channel, }); if counterparty_node_id.is_none() { @@ -7016,6 +7131,7 @@ mod tests { check_added_monitors(&nodes[1], 1); } + #[cfg(not(feature = "safe_channels"))] #[test] fn test_funding_spend_refuses_updates() { do_test_funding_spend_refuses_updates(true); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1a9af4f2071..b9c14abe4db 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -150,8 +150,7 @@ fn test_monitor_and_persister_update_fail() { } logger.assert_log_regex( "lightning::chain::chainmonitor", - regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.") - .unwrap(), + regex::Regex::new("Failed to update ChannelMonitor").unwrap(), 1, ); @@ -2839,6 +2838,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } + +#[cfg(not(feature = "safe_channels"))] #[test] fn channel_holding_cell_serialize() { do_channel_holding_cell_serialize(true, true); @@ -3320,6 +3321,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { assert!(nodes[0].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_outbound_reload_without_init_mon() { do_test_outbound_reload_without_init_mon(true); @@ -3428,6 +3430,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo assert!(nodes[1].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_inbound_reload_without_init_mon() { do_test_inbound_reload_without_init_mon(true, true); @@ -3577,6 +3580,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_blocked_chan_preimage_release() { do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::AtReload); @@ -3767,6 +3771,7 @@ fn do_test_inverted_mon_completion_order( expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_inverted_mon_completion_order() { do_test_inverted_mon_completion_order(true, true); @@ -3969,6 +3974,7 @@ fn do_test_durable_preimages_on_closed_channel( } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_durable_preimages_on_closed_channel() { do_test_durable_preimages_on_closed_channel(true, true, true); @@ -4093,6 +4099,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { send_payment(&nodes[1], &[&nodes[2]], 100_000); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_reload_mon_update_completion_actions() { do_test_reload_mon_update_completion_actions(true); @@ -4459,6 +4466,7 @@ fn do_test_partial_claim_mon_update_compl_actions(reload_a: bool, reload_b: bool assert!(!get_monitor!(nodes[3], chan_4_id).get_stored_preimages().contains_key(&payment_hash)); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_partial_claim_mon_update_compl_actions() { do_test_partial_claim_mon_update_compl_actions(true, true); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c5942f38da6..434d96572de 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5195,8 +5195,8 @@ where { log_info!( logger, - "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", - &self.channel_id(), + "Attempting to fail HTLC due to fee spike buffer violation. Rebalancing is required.", + ); return Err(LocalHTLCFailureReason::FeeSpikeBuffer); } @@ -6059,6 +6059,7 @@ where should_broadcast: broadcast, }], channel_id: Some(self.channel_id()), + encoded_channel: Some(Vec::new()), // Clear channel on shut down. }; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), update)) } else { @@ -6291,11 +6292,7 @@ where { let signatures = self.get_initial_counterparty_commitment_signatures(funding, logger); if let Some((signature, htlc_signatures)) = signatures { - log_info!( - logger, - "Generated commitment_signed for peer for channel {}", - &self.channel_id() - ); + log_info!(logger, "Generated commitment_signed for peer",); if matches!(self.channel_state, ChannelState::FundingNegotiated(_)) { // We shouldn't expect any HTLCs before `ChannelReady`. debug_assert!(htlc_signatures.is_empty()); @@ -6396,9 +6393,9 @@ where log_info!( logger, - "Funding txid {} for channel {} confirmed in block {}", + "Funding txid {} confirmed in block {}", funding_txo.txid, - &self.channel_id(), + block_hash, ); @@ -7333,6 +7330,7 @@ where payment_info, }], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; if !self.context.channel_state.can_generate_new_commitment() { @@ -7371,8 +7369,7 @@ where } log_trace!( logger, - "Adding HTLC claim to holding_cell in channel {}! Current state: {}", - &self.context.channel_id(), + "Adding HTLC claim to holding_cell! Current state: {}", self.context.channel_state.to_u32() ); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { @@ -7403,9 +7400,8 @@ where } log_trace!( logger, - "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", + "Upgrading HTLC {} to LocalRemoved with a Fulfill!", &htlc.payment_hash, - &self.context.channel_id ); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill( payment_preimage_arg.clone(), @@ -7448,6 +7444,7 @@ where // to be strictly increasing by one, so decrement it here. self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); + monitor_update.encoded_channel = Some(self.encode()); } else { let blocked_upd = self.context.blocked_monitor_updates.get(0); let new_mon_id = blocked_upd @@ -7565,13 +7562,13 @@ where _ => {} } } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id()); + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell.", htlc_id_arg); self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg)); return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg, - E::Message::name(), &self.context.channel_id()); + log_trace!(logger, "Failing HTLC ID {} back with {} message.", htlc_id_arg, + E::Message::name()); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; htlc.state = err_contents.clone().to_inbound_htlc_state(); @@ -7938,18 +7935,20 @@ where let counterparty_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!( logger, - "Splice initial counterparty tx for channel {} is: txid {} tx {}", - &self.context.channel_id(), + "Splice initial counterparty tx is: txid {} tx {}", counterparty_bitcoin_tx.txid, encode::serialize_hex(&counterparty_bitcoin_tx.transaction) ); } - log_info!(logger, "Received splice initial commitment_signed from peer for channel {} with funding txid {}", - &self.context.channel_id(), pending_splice_funding.get_funding_txo().unwrap().txid); + log_info!( + logger, + "Received splice initial commitment_signed from peer with funding txid {}", + pending_splice_funding.get_funding_txo().unwrap().txid + ); self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { + let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::RenegotiatedFunding { channel_parameters: pending_splice_funding.channel_transaction_parameters.clone(), @@ -7957,6 +7956,7 @@ where counterparty_commitment_tx, }], channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; self.context @@ -7966,6 +7966,7 @@ where .received_commitment_signed(); self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + monitor_update.encoded_channel = Some(self.encode()); Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8222,6 +8223,7 @@ where update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; self.context.expecting_peer_commitment_signed = false; @@ -8244,8 +8246,9 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } - log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", - &self.context.channel_id); + log_debug!(logger, "Received valid commitment_signed from peer, updated HTLC state but awaiting a monitor update resolution to reply.", + ); + monitor_update.encoded_channel = Some(self.encode()); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8274,6 +8277,7 @@ where Vec::new(), Vec::new(), ); + monitor_update.encoded_channel = Some(self.encode()); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8313,20 +8317,20 @@ where { log_trace!( logger, - "Freeing holding cell with {} HTLC updates{} in channel {}", + "Freeing holding cell with {} HTLC updates{}", self.context.holding_cell_htlc_updates.len(), if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, - &self.context.channel_id() ); let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! updates: Vec::new(), channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; let mut htlc_updates = Vec::new(); @@ -8377,7 +8381,12 @@ where update_add_count += 1; }, Err((_, msg)) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id()); + log_info!( + logger, + "Failed to send HTLC with payment_hash {} due to {}", + &payment_hash, + msg + ); // If we fail to send here, then this HTLC should be failed // backwards. Failing to send here indicates that this HTLC may // keep being put back into the holding cell without ever being @@ -8417,6 +8426,8 @@ where unreachable!() }; update_fulfill_count += 1; + + additional_monitor_update.encoded_channel = Some(self.encode()); monitor_update.updates.append(&mut additional_monitor_update.updates); None }, @@ -8470,11 +8481,13 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", - &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" }, + log_debug!(logger, "Freeing holding cell resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", + if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + + monitor_update.encoded_channel = Some(self.encode()); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8591,6 +8604,7 @@ where secret: msg.per_commitment_secret, }], channel_id: Some(self.context.channel_id()), + encoded_channel: None, }; // Update state now that we've passed all the can-fail calls... @@ -8604,15 +8618,18 @@ where self.context.counterparty_next_commitment_point = Some(msg.next_per_commitment_point); self.context.counterparty_next_commitment_transaction_number -= 1; + log_trace!( + logger, + "FundedChannel {} new counterparty_next_commitment_transaction_number: {}", + self.context.channel_id(), + self.context.counterparty_next_commitment_transaction_number + ); + if self.context.announcement_sigs_state == AnnouncementSigsState::Committed { self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } - log_trace!( - logger, - "Updating HTLCs on receipt of RAA in channel {}...", - &self.context.channel_id() - ); + log_trace!(logger, "Updating HTLCs on receipt of RAA..."); let mut to_forward_infos = Vec::new(); let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); @@ -8816,6 +8833,7 @@ where }; macro_rules! return_with_htlcs_to_fail { ($htlcs_to_fail: expr) => { + monitor_update.encoded_channel = Some(self.encode()); if !release_monitor { self.context .blocked_monitor_updates @@ -8836,8 +8854,8 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.", - &self.context.channel_id(), release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with holding cell HTLCs freed. {} monitor update.", + release_state_str); self.monitor_updating_paused( false, @@ -8865,8 +8883,7 @@ where log_debug!( logger, - "Received a valid revoke_and_ack for channel {}. {} monitor update.", - &self.context.channel_id(), + "Received a valid revoke_and_ack. {} monitor update.", release_state_str ); if self.context.channel_state.can_generate_new_commitment() { @@ -8882,12 +8899,7 @@ where } else { "can continue progress" }; - log_debug!( - logger, - "Holding back commitment update until channel {} {}", - &self.context.channel_id, - reason - ); + log_debug!(logger, "Holding back commitment update until {}", reason); } self.monitor_updating_paused( @@ -8900,8 +8912,8 @@ where ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.", - &self.context.channel_id(), release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update.", + release_state_str); self.monitor_updating_paused( false, @@ -8955,9 +8967,8 @@ where { log_info!( logger, - "Sending 0conf splice_locked txid {} to our peer for channel {}", + "Sending 0conf splice_locked txid {} to our peer", splice_txid, - &self.context.channel_id ); } @@ -9632,12 +9643,12 @@ where } } if !self.holder_commitment_point.can_advance() { - log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", - &self.context.channel_id(), self.holder_commitment_point.next_transaction_number()); + log_trace!(logger, "Last revoke-and-ack pending for sequence {} because the next per-commitment point is not available", + self.holder_commitment_point.next_transaction_number()); } if per_commitment_secret.is_none() { - log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment secret for {} is not available", - &self.context.channel_id(), self.holder_commitment_point.next_transaction_number(), + log_trace!(logger, "Last revoke-and-ack pending for sequence {} because the next per-commitment secret for {} is not available", + self.holder_commitment_point.next_transaction_number(), self.holder_commitment_point.next_transaction_number() + 2); } // Technically if HolderCommitmentPoint::can_advance is false, @@ -9646,8 +9657,8 @@ where // CS before we have any commitment point available. Blocking our // RAA here is a convenient way to make sure that post-funding // we're only ever waiting on one commitment point at a time. - log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", - &self.context.channel_id(), self.holder_commitment_point.next_transaction_number()); + log_trace!(logger, "Last revoke-and-ack pending for sequence {} because the next per-commitment point is not available", + self.holder_commitment_point.next_transaction_number()); self.context.signer_pending_revoke_and_ack = true; None } @@ -9727,8 +9738,8 @@ where None }; - log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" }, + log_trace!(logger, "Regenerating latest commitment update with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger) { if self.context.signer_pending_commitment_update { @@ -10067,9 +10078,9 @@ where if msg.next_local_commitment_number == next_counterparty_commitment_number { if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); + log_debug!(logger, "Reconnected with only lost outbound RAA"); } else { - log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); + log_debug!(logger, "Reconnected with no loss"); } Ok(ReestablishResponses { @@ -10092,9 +10103,9 @@ where assert!(tx_signatures.is_none()); if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { - log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); + log_debug!(logger, "Reconnected channel with lost outbound RAA and lost remote commitment tx"); } else { - log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); + log_debug!(logger, "Reconnected channel with only lost remote commitment tx"); } if self.context.channel_state.is_monitor_update_in_progress() { @@ -10112,7 +10123,7 @@ where } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst && self.context.signer_pending_revoke_and_ack { - log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack", &self.context.channel_id()); + log_trace!(logger, "Reconnected channel with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack"); self.context.signer_pending_commitment_update = true; None } else { @@ -10120,7 +10131,7 @@ where }; let raa = if self.context.resend_order == RAACommitmentOrder::CommitmentFirst && self.context.signer_pending_commitment_update && required_revoke.is_some() { - log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update", &self.context.channel_id()); + log_trace!(logger, "Reconnected channel with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update"); self.context.signer_pending_revoke_and_ack = true; None } else { @@ -10441,6 +10452,7 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -10895,10 +10907,9 @@ where if self.context.blocked_monitor_updates.is_empty() { return None; } - Some(( - self.context.blocked_monitor_updates.remove(0).update, - !self.context.blocked_monitor_updates.is_empty(), - )) + let mut update = self.context.blocked_monitor_updates.remove(0).update; + update.encoded_channel = Some(self.encode()); + Some((update, !self.context.blocked_monitor_updates.is_empty())) } /// Pushes a new monitor update into our monitor update queue, returning it if it should be @@ -10923,14 +10934,12 @@ where pub fn on_startup_drop_completed_blocked_mon_updates_through( &mut self, logger: &L, loaded_mon_update_id: u64, ) { - let channel_id = self.context.channel_id(); self.context.blocked_monitor_updates.retain(|update| { if update.update.update_id <= loaded_mon_update_id { log_info!( logger, - "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager", + "Dropping completed ChannelMonitorUpdate id {} due to a stale ChannelManager", update.update.update_id, - channel_id, ); false } else { @@ -11143,21 +11152,11 @@ where return None; } } else { - log_info!( - logger, - "Waiting on splice_locked txid {} for channel {}", - splice_txid, - &self.context.channel_id, - ); + log_info!(logger, "Waiting on splice_locked txid {}", splice_txid); return None; } - log_info!( - logger, - "Promoting splice funding txid {} for channel {}", - splice_txid, - &self.context.channel_id, - ); + log_info!(logger, "Promoting splice funding txid {}", splice_txid); let discarded_funding = { // Scope `funding` to avoid unintentionally using it later since it is swapped below. @@ -11210,6 +11209,7 @@ where funding_txid: funding_txo.txid, }], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); @@ -11263,7 +11263,7 @@ where } if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { - log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); + log_info!(logger, "Sending a channel_ready to our peer"); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); return Ok((Some(FundingConfirmedMessage::Establishment(channel_ready)), announcement_sigs)); } @@ -11389,7 +11389,7 @@ where let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; - log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); + log_info!(logger, "Sending a channel_ready to our peer"); return Ok((Some(FundingConfirmedMessage::Establishment(channel_ready)), timed_out_htlcs, announcement_sigs)); } @@ -11412,7 +11412,7 @@ where } } else if !self.funding.is_outbound() && self.funding.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { - log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id); + log_info!(logger, "Closing channel due to funding timeout"); // If funding_tx_confirmed_in is unset, the channel must not be active assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new())); assert!(!self.context.channel_state.is_our_channel_ready()); @@ -11458,9 +11458,9 @@ where height, ) { log_info!( - logger, "Sending splice_locked txid {} to our peer for channel {}", + logger, "Sending splice_locked txid {} to our peer", splice_locked.splice_txid, - &self.context.channel_id + ); let (funding_txo, monitor_update, announcement_sigs, discarded_funding) = chain_node_signer @@ -11621,7 +11621,7 @@ where return None; } - log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id()); + log_trace!(logger, "Creating an announcement_signatures message"); let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) { Ok(a) => a, Err(e) => { @@ -11829,7 +11829,7 @@ where let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); let remote_last_secret = if self.context.counterparty_next_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.counterparty_next_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id()); + log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret)); remote_last_secret } else { log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id()); @@ -12449,12 +12449,7 @@ where NS::Target: NodeSigner, L::Target: Logger, { - log_info!( - logger, - "Received splice_locked txid {} from our peer for channel {}", - msg.splice_txid, - &self.context.channel_id, - ); + log_info!(logger, "Received splice_locked txid {} from our peer", msg.splice_txid,); let pending_splice = match self.pending_splice.as_mut() { Some(pending_splice) => pending_splice, @@ -12476,9 +12471,8 @@ where if pending_splice.sent_funding_txid.is_none() { log_info!( logger, - "Waiting for enough confirmations to send splice_locked txid {} for channel {}", + "Waiting for enough confirmations to send splice_locked txid {}", msg.splice_txid, - &self.context.channel_id, ); return Ok(None); } @@ -12765,12 +12759,13 @@ where } self.context.latest_monitor_update_id += 1; + self.context.channel_state.set_awaiting_remote_revoke(); let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![update], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; - self.context.channel_state.set_awaiting_remote_revoke(); monitor_update } @@ -12837,19 +12832,19 @@ where htlc_signatures = res.1; let trusted_tx = counterparty_commitment_tx.trust(); - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", + log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {}", encode::serialize_hex(&trusted_tx.built_transaction().transaction), &trusted_tx.txid(), encode::serialize_hex(&funding.get_funding_redeemscript()), - log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); + log_bytes!(signature.serialize_compact()[..])); let counterparty_keys = trusted_tx.keys(); debug_assert_eq!(htlc_signatures.len(), trusted_tx.nondust_htlcs().len()); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(trusted_tx.nondust_htlcs()) { - log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", + log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {}", encode::serialize_hex(&chan_utils::build_htlc_transaction(&trusted_tx.txid(), trusted_tx.negotiated_feerate_per_kw(), funding.get_holder_selected_contest_delay(), htlc, funding.get_channel_type(), &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, funding.get_channel_type(), &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()), - log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id()); + log_bytes!(htlc_sig.serialize_compact()[..])); } } @@ -13015,6 +13010,7 @@ where scriptpubkey: self.get_closing_scriptpubkey(), }], channel_id: Some(self.context.channel_id()), + encoded_channel: Some(self.encode()), }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); self.push_ret_blockable_mon_update(monitor_update) @@ -15640,6 +15636,56 @@ where } } +pub struct ChannelStateCheckData { + pub cur_holder_commitment_transaction_number: u64, + pub revoked_counterparty_commitment_transaction_number: u64, + pub cur_counterparty_commitment_transaction_number: u64, + pub latest_monitor_update_id: u64, +} + +pub fn read_check_data(reader: &mut R) -> Result { + let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + if ver <= 2 { + return Err(DecodeError::UnknownVersion); + } + + let _user_id_low: u64 = Readable::read(reader)?; + + let mut _config = LegacyChannelConfig::default(); + let mut _val: u64 = Readable::read(reader)?; + + let _channel_id: ChannelId = Readable::read(reader)?; + let channel_state = + ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?; + let _channel_value_satoshis: u64 = Readable::read(reader)?; + + let latest_monitor_update_id = Readable::read(reader)?; + + // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set. + let mut _shutdown_scriptpubkey = match ::read(reader) { + Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)), + Err(_) => None, + }; + let _destination_script: ScriptBuf = Readable::read(reader)?; + + let holder_commitment_next_transaction_number: u64 = Readable::read(reader)?; + let counterparty_next_commitment_transaction_number: u64 = Readable::read(reader)?; + + let cur_holder_commitment_transaction_number = holder_commitment_next_transaction_number + 1; + let revoked_counterparty_commitment_transaction_number = + counterparty_next_commitment_transaction_number + 2; + let cur_counterparty_commitment_transaction_number = + counterparty_next_commitment_transaction_number + + if channel_state.is_awaiting_remote_revoke() { 0 } else { 1 }; + + Ok(ChannelStateCheckData { + cur_holder_commitment_transaction_number, + revoked_counterparty_commitment_transaction_number, + cur_counterparty_commitment_transaction_number, + latest_monitor_update_id, + }) +} + fn duration_since_epoch() -> Option { #[cfg(not(feature = "std"))] let now = None; diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 3fd546aaff7..fd0483601fc 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -2095,6 +2095,7 @@ pub fn test_batch_channel_open() { ))); } +#[cfg(not(feature = "safe_channels"))] #[xtest(feature = "_externalize_tests")] pub fn test_close_in_funding_batch() { // This test ensures that if one of the channels @@ -2183,6 +2184,7 @@ pub fn test_close_in_funding_batch() { assert!(nodes[0].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[xtest(feature = "_externalize_tests")] pub fn test_batch_funding_close_after_funding_signed() { let chanmon_cfgs = create_chanmon_cfgs(3); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 644920557d2..7d52f39e8b0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3365,7 +3365,7 @@ macro_rules! convert_channel_err { ChannelError::Close((msg, reason)) => { let (mut shutdown_res, chan_update) = $close(reason); let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None); - log_error!(logger, "Closed channel {} due to close-required error: {}", $channel_id, msg); + log_error!(logger, "Closed channel due to close-required error: {}", msg); $locked_close(&mut shutdown_res, $chan); let err = MsgHandleErrInternal::from_finish_shutdown(msg, $channel_id, shutdown_res, chan_update); @@ -3664,7 +3664,7 @@ macro_rules! handle_monitor_update_completion { /// Returns whether the monitor update is completed, `false` if the update is in-progress. fn handle_monitor_update_res( - cm: &CM, update_res: ChannelMonitorUpdateStatus, channel_id: ChannelId, logger: LG, + cm: &CM, update_res: ChannelMonitorUpdateStatus, logger: LG, ) -> bool { debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire)); match update_res { @@ -3678,8 +3678,10 @@ fn handle_monitor_update_res( if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 { panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); } - log_debug!(logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", - channel_id); + log_debug!( + logger, + "ChannelMonitor update in flight, holding messages until the update completes.", + ); false }, ChannelMonitorUpdateStatus::Completed => { @@ -3695,8 +3697,7 @@ fn handle_monitor_update_res( macro_rules! handle_initial_monitor { ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let update_completed = - handle_monitor_update_res($self, $update_res, $chan.context.channel_id(), logger); + let update_completed = handle_monitor_update_res($self, $update_res, logger); if update_completed { handle_monitor_update_completion!( $self, @@ -3731,7 +3732,7 @@ fn handle_new_monitor_update_internal( if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) { let update_res = cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res(cm, update_res, channel_id, logger); + let update_completed = handle_monitor_update_res(cm, update_res, logger); if update_completed { let _ = in_flight_updates.remove(update_idx); } @@ -3819,6 +3820,11 @@ macro_rules! handle_new_monitor_update { $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr ) => {{ + eprintln!("MACRO DEBUG called at {}:{}", file!(), line!()); + + // let mut update = $update; + // update.encoded_channel = Some($chan.encode()); + let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( $self, &mut $peer_state.in_flight_monitor_updates, @@ -4647,7 +4653,7 @@ where }; if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { - log_error!(logger, "Force-closing channel {}", channel_id); + log_error!(logger, "Force-closing channel"); let err = ChannelError::Close((message, reason)); let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); mem::drop(peer_state_lock); @@ -4660,7 +4666,7 @@ where let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id); Ok(()) } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { - log_error!(logger, "Force-closing inbound channel request {}", &channel_id); + log_error!(logger, "Force-closing inbound channel request"); if !is_from_counterparty && peer_state.is_connected { peer_state.pending_msg_events.push( MessageSendEvent::HandleError { @@ -5151,11 +5157,7 @@ where }); } let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_trace!( - logger, - "Attempting to generate broadcast channel update for channel {}", - &chan.context.channel_id() - ); + log_trace!(logger, "Attempting to generate broadcast channel update",); self.get_channel_update_for_unicast(chan) } @@ -5173,14 +5175,14 @@ where #[rustfmt::skip] fn get_channel_update_for_unicast(&self, chan: &FundedChannel) -> Result { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id()); + log_trace!(logger, "Attempting to generate channel update"); let short_channel_id = match chan.funding.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, }; let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id()); + log_trace!(logger, "Generating channel update"); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; let enabled = chan.context.is_enabled(); @@ -6765,7 +6767,7 @@ where }, None => { let error = format!( - "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" + "Channel not found for the passed counterparty node_id {next_node_id}" ); let logger = WithContext::from( &self.logger, @@ -6774,7 +6776,11 @@ where None, ); log_error!(logger, "{error} when attempting to forward intercepted HTLC"); - return Err(APIError::ChannelUnavailable { err: error }); + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" + ), + }); }, } }; @@ -7508,8 +7514,8 @@ where } else { "alternate" }; - log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", - prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); + log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} with corresponding peer {}", + prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, &counterparty_node_id); if let Err((reason, msg)) = optimal_channel.queue_add_htlc( *outgoing_amt_msat, *payment_hash, @@ -8071,8 +8077,8 @@ where chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersistNoEvents; } - log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.", - &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); + log_trace!(logger, "Channel qualifies for a feerate change from {} to {}.", + chan.context.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger); NotifyOption::DoPersist @@ -8211,8 +8217,8 @@ where if peer_state.is_connected { if funded_chan.should_disconnect_peer_awaiting_response() { let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None); - log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}", - counterparty_node_id, chan_id); + log_debug!(logger, "Disconnecting peer {} due to not making any progress", + counterparty_node_id); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::DisconnectPeerWithWarning { @@ -8234,8 +8240,8 @@ where let context = chan.context(); let logger = WithChannelContext::from(&self.logger, context, None); log_error!(logger, - "Force-closing pending channel with ID {} for not establishing in a timely manner", - context.channel_id()); + "Force-closing pending channel for not establishing in a timely manner", + ); let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); @@ -8960,8 +8966,11 @@ where let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false); if let Some(action) = action_opt { - log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}", - chan_id, action); + log_trace!( + logger, + "Tracking monitor update completion action: {:?}", + action + ); peer_state .monitor_update_blocked_actions .entry(chan_id) @@ -9032,8 +9041,8 @@ where mem::drop(peer_state_lock); - log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", - chan_id, action); + log_trace!(logger, "Completing monitor update completion action as claim was redundant: {:?}", + action); if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, blocking_action: blocker, @@ -9104,6 +9113,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ payment_info, }], channel_id: Some(prev_hop.channel_id), + encoded_channel: None, }; // We don't have any idea if this is a duplicate claim without interrogating the @@ -9134,8 +9144,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(action) = action_opt { log_trace!( logger, - "Tracking monitor update completion action for closed channel {}: {:?}", - chan_id, + "Tracking monitor update completion action for closed channel: {:?}", action ); peer_state @@ -9518,8 +9527,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ channel_ready_order: ChannelReadyOrder, ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", - &channel.context.channel_id(), + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), pending_update_adds.len(), @@ -10320,6 +10328,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_chan!("Already had channel with the new channel_id"); }, hash_map::Entry::Vacant(e) => { + monitor.update_encoded_channel(chan.encode()); let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { // There's no problem signing a counterparty's funding transaction if our monitor @@ -10484,6 +10493,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match chan .funding_signed(&msg, best_block, &self.signer_provider, &self.logger) .and_then(|(funded_chan, monitor)| { + monitor.update_encoded_channel(funded_chan.encode()); self.chain_monitor .watch_channel(funded_chan.context.channel_id(), monitor) .map_err(|()| { @@ -10809,7 +10819,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let announcement_sigs_opt = try_channel_entry!(self, peer_state, res, chan_entry); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id()); + log_trace!(logger, "Sending announcement_signatures"); peer_state.pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, @@ -10820,7 +10830,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id()); + log_trace!(logger, "Sending private initial channel_update for our counterparty"); if let Ok(msg) = self.get_channel_update_for_unicast(chan) { peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -10872,9 +10882,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !chan.received_shutdown() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.", - msg.channel_id, - if chan.sent_shutdown() { " after we initiated shutdown" } else { "" }); + log_info!( + logger, + "Received a shutdown message from our counterparty {}.", + if chan.sent_shutdown() { + " after we initiated shutdown" + } else { + "" + } + ); } let funding_txo_opt = chan.funding.get_funding_txo(); @@ -10914,7 +10930,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan_entry.get().context(), None, ); - log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); + log_error!(logger, "Immediately closing unfunded channel as peer asked to cooperatively shut it down (which is unnecessary)"); let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); @@ -11077,8 +11093,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, - "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", - msg.channel_id); + "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor", + ); peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id) .or_insert_with(Vec::new) .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop)); @@ -11195,6 +11211,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { + monitor.update_encoded_channel(chan.encode()); let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, @@ -11667,7 +11684,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ return Ok(NotifyOption::SkipPersistNoEvents); } else { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id); + log_debug!(logger, "Received channel_update {:?}.", msg); let did_change = try_channel_entry!(self, peer_state, chan.channel_update(&msg), chan_entry); // If nothing changed after applying their update, we don't need to bother // persisting. @@ -11755,8 +11772,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, hash_map::Entry::Vacant(_) => { - log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure", - msg.channel_id); + log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel to force channel closure", + ); // Unfortunately, lnd doesn't force close on errors // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119). // One of the few ways to get an lnd counterparty to force close is by @@ -11955,11 +11972,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(announcement_sigs) = splice_promotion.announcement_sigs { - log_trace!( - logger, - "Sending announcement_signatures for channel {}", - chan.context.channel_id() - ); + log_trace!(logger, "Sending announcement_signatures",); peer_state.pending_msg_events.push( MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), @@ -12298,8 +12311,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(shutdown) = shutdown_result { let context = chan.context(); let logger = WithChannelContext::from(&self.logger, context, None); - let chan_id = context.channel_id(); - log_trace!(logger, "Removing channel {} now that the signer is unblocked", chan_id); + log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { let err = convert_channel_err!(self, peer_state, shutdown, funded, COOP_CLOSED); @@ -13552,8 +13564,8 @@ where // Check that, while holding the peer lock, we don't have anything else // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. - log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", - &channel_id); + log_trace!(logger, "Delaying monitor unlock as another channel's mon update needs to complete first", + ); break; } @@ -13562,8 +13574,8 @@ where if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let channel_funding_outpoint = chan.funding_outpoint(); if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { - log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", - channel_id); + log_debug!(logger, "Unlocking monitor updating and updating monitor", + ); handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { @@ -13572,8 +13584,8 @@ where continue; } } else { - log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update", - channel_id); + log_trace!(logger, "Unlocked monitor updating without monitors to update", + ); } } } @@ -13625,6 +13637,7 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + encoded_channel: None, }; let during_startup = @@ -14339,7 +14352,7 @@ where Some(FundingConfirmedMessage::Establishment(channel_ready)) => { send_channel_ready!(self, pending_msg_events, funded_channel, channel_ready); if funded_channel.context.is_usable() && peer_state.is_connected { - log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel_id); + log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty"); if let Ok(msg) = self.get_channel_update_for_unicast(funded_channel) { pending_msg_events.push(MessageSendEvent::SendChannelUpdate { node_id: funded_channel.context.get_counterparty_node_id(), @@ -14347,7 +14360,7 @@ where }); } } else { - log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel_id); + log_trace!(logger, "Sending channel_ready WITHOUT channel_update"); } }, Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update_opt, discarded_funding)) => { @@ -14440,7 +14453,7 @@ where } if let Some(announcement_sigs) = announcement_sigs { if peer_state.is_connected { - log_trace!(logger, "Sending announcement_signatures for channel {}", funded_channel.context.channel_id()); + log_trace!(logger, "Sending announcement_signatures"); pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: funded_channel.context.get_counterparty_node_id(), msg: announcement_sigs, @@ -16764,6 +16777,19 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; + + // Discard channel manager versions of channels. + for _ in 0..channel_count { + _ = FundedChannel::read( + reader, + ( + &args.entropy_source, + &args.signer_provider, + &provided_channel_type_features(&args.config), + ), + )?; + } + let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); let mut per_peer_state = hash_map_with_capacity(cmp::min( channel_count as usize, @@ -16772,9 +16798,16 @@ where let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - for _ in 0..channel_count { + for (_, monitor) in args.channel_monitors.iter() { + let opt_encoded_channel = monitor.get_encoded_channel(); + if opt_encoded_channel.is_none() { + // Monitor still exists, but there is no more channel state. This can happen after channel shut down. + continue; + } + let encoded_channel = opt_encoded_channel.unwrap(); + let encoded_channel_reader = &mut &encoded_channel[..]; let mut channel: FundedChannel = FundedChannel::read( - reader, + encoded_channel_reader, ( &args.entropy_source, &args.signer_provider, @@ -16784,169 +16817,82 @@ where let logger = WithChannelContext::from(&args.logger, &channel.context, None); let channel_id = channel.context.channel_id(); channel_id_set.insert(channel_id); - if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) { + if channel.get_cur_holder_commitment_transaction_number() + > monitor.get_cur_holder_commitment_number() + || channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + || channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + || channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() + { + // But if the channel is behind of the monitor, close the channel: + log_error!( + logger, + "A ChannelManager is stale compared to the current ChannelMonitor!" + ); + log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); + if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + log_error!(logger, " The ChannelMonitor is at update_id {} but the ChannelManager is at update_id {}.", + monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); + } if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() - || channel.get_revoked_counterparty_commitment_transaction_number() - > monitor.get_min_seen_secret() - || channel.get_cur_counterparty_commitment_transaction_number() - > monitor.get_cur_counterparty_commitment_number() - || channel.context.get_latest_monitor_update_id() - < monitor.get_latest_update_id() { - // But if the channel is behind of the monitor, close the channel: - log_error!( - logger, - "A ChannelManager is stale compared to the current ChannelMonitor!" - ); - log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); - if channel.context.get_latest_monitor_update_id() - < monitor.get_latest_update_id() - { - log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); - } - if channel.get_cur_holder_commitment_transaction_number() - > monitor.get_cur_holder_commitment_number() - { - log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", - &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); - } - if channel.get_revoked_counterparty_commitment_transaction_number() - > monitor.get_min_seen_secret() - { - log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", - &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); - } - if channel.get_cur_counterparty_commitment_transaction_number() - > monitor.get_cur_counterparty_commitment_number() - { - log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", - &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); - } - let shutdown_result = - channel.force_shutdown(ClosureReason::OutdatedChannelManager); - if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { - return Err(DecodeError::InvalidValue); - } - if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = - shutdown_result.monitor_update - { - // Our channel information is out of sync with the `ChannelMonitor`, so - // force the update to use the `ChannelMonitor`'s update_id for the close - // update. - let latest_update_id = monitor.get_latest_update_id().saturating_add(1); - update.update_id = latest_update_id; - per_peer_state - .entry(counterparty_node_id) - .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock() - .unwrap() - .closed_channel_monitor_update_ids - .entry(channel_id) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); - - close_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update, - }, - ); - } - for (source, hash, cp_id, chan_id) in shutdown_result.dropped_outbound_htlcs { - let reason = LocalHTLCFailureReason::ChannelClosed; - failed_htlcs.push((source, hash, cp_id, chan_id, reason, None)); - } - channel_closures.push_back(( - events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::OutdatedChannelManager, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, - None, - )); - for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { - let mut found_htlc = false; - for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { - if *channel_htlc_source == monitor_htlc_source { - found_htlc = true; - break; - } - } - if !found_htlc { - // If we have some HTLCs in the channel which are not present in the newer - // ChannelMonitor, they have been removed and should be failed back to - // ensure we don't forget them entirely. Note that if the missing HTLC(s) - // were actually claimed we'd have generated and ensured the previous-hop - // claim update ChannelMonitor updates were persisted prior to persising - // the ChannelMonitor update for the forward leg, so attempting to fail the - // backwards leg of the HTLC will simply be rejected. - let logger = WithChannelContext::from( - &args.logger, - &channel.context, - Some(*payment_hash), - ); - log_info!(logger, - "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - &channel.context.channel_id(), &payment_hash); - failed_htlcs.push(( - channel_htlc_source.clone(), - *payment_hash, - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - LocalHTLCFailureReason::ChannelClosed, - None, - )); - } - } - } else { - channel.on_startup_drop_completed_blocked_mon_updates_through( - &logger, - monitor.get_latest_update_id(), - ); - log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates", - &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), - monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); - if let Some(short_channel_id) = channel.funding.get_short_channel_id() { - short_to_chan_info.insert( - short_channel_id, - ( - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - ), - ); - } - - for short_channel_id in channel.context.historical_scids() { - let cp_id = channel.context.get_counterparty_node_id(); - let chan_id = channel.context.channel_id(); - short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); - } - + log_error!(logger, " The ChannelMonitor is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", + monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); + } + if channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + { + log_error!(logger, " The ChannelMonitor is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", + monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); + } + if channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + { + log_error!(logger, " The ChannelMonitor is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", + monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); + } + let shutdown_result = channel.force_shutdown(ClosureReason::OutdatedChannelManager); + if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { + return Err(DecodeError::InvalidValue); + } + if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = + shutdown_result.monitor_update + { + // Our channel information is out of sync with the `ChannelMonitor`, so + // force the update to use the `ChannelMonitor`'s update_id for the close + // update. + let latest_update_id = monitor.get_latest_update_id().saturating_add(1); + update.update_id = latest_update_id; per_peer_state - .entry(channel.context.get_counterparty_node_id()) + .entry(counterparty_node_id) .or_insert_with(|| Mutex::new(empty_peer_state())) - .get_mut() + .lock() .unwrap() - .channel_by_id - .insert(channel.context.channel_id(), Channel::from(channel)); + .closed_channel_monitor_update_ids + .entry(channel_id) + .and_modify(|v| *v = cmp::max(latest_update_id, *v)) + .or_insert(latest_update_id); + + close_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update, + }, + ); + } + for (source, hash, cp_id, chan_id) in shutdown_result.dropped_outbound_htlcs { + let reason = LocalHTLCFailureReason::ChannelClosed; + failed_htlcs.push((source, hash, cp_id, chan_id, reason, None)); } - } else if channel.is_awaiting_initial_mon_persist() { - // If we were persisted and shut down while the initial ChannelMonitor persistence - // was in-progress, we never broadcasted the funding transaction and can still - // safely discard the channel. channel_closures.push_back(( events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::DisconnectedPeer, + reason: ClosureReason::OutdatedChannelManager, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.funding.get_value_satoshis()), channel_funding_txo: channel.funding.get_funding_txo(), @@ -16954,20 +16900,68 @@ where }, None, )); + for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { + let mut found_htlc = false; + for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { + if *channel_htlc_source == monitor_htlc_source { + found_htlc = true; + break; + } + } + if !found_htlc { + // If we have some HTLCs in the channel which are not present in the newer + // ChannelMonitor, they have been removed and should be failed back to + // ensure we don't forget them entirely. Note that if the missing HTLC(s) + // were actually claimed we'd have generated and ensured the previous-hop + // claim update ChannelMonitor updates were persisted prior to persising + // the ChannelMonitor update for the forward leg, so attempting to fail the + // backwards leg of the HTLC will simply be rejected. + let logger = WithChannelContext::from( + &args.logger, + &channel.context, + Some(*payment_hash), + ); + log_info!(logger, + "Failing HTLC with hash {} as it is missing in the ChannelMonitor but was present in the (stale) ChannelManager", + &payment_hash); + failed_htlcs.push(( + channel_htlc_source.clone(), + *payment_hash, + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + LocalHTLCFailureReason::ChannelClosed, + None, + )); + } + } } else { - log_error!( - logger, - "Missing ChannelMonitor for channel {} needed by ChannelManager.", - &channel.context.channel_id() - ); - log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!( - logger, - " Without the ChannelMonitor we cannot continue without risking funds." + channel.on_startup_drop_completed_blocked_mon_updates_through( + &logger, + monitor.get_latest_update_id(), ); - log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - return Err(DecodeError::InvalidValue); + log_info!(logger, "Successfully loaded at update_id {} against monitor at update id {} with {} blocked updates", + channel.context.get_latest_monitor_update_id(), + monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); + if let Some(short_channel_id) = channel.funding.get_short_channel_id() { + short_to_chan_info.insert( + short_channel_id, + (channel.context.get_counterparty_node_id(), channel.context.channel_id()), + ); + } + + for short_channel_id in channel.context.historical_scids() { + let cp_id = channel.context.get_counterparty_node_id(); + let chan_id = channel.context.channel_id(); + short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id)); + } + + per_peer_state + .entry(channel.context.get_counterparty_node_id()) + .or_insert_with(|| Mutex::new(empty_peer_state())) + .get_mut() + .unwrap() + .channel_by_id + .insert(channel.context.channel_id(), Channel::from(channel)); } } @@ -17009,8 +17003,7 @@ where let channel_id = monitor.channel_id(); log_info!( logger, - "Queueing monitor update to ensure missing channel {} is force closed", - &channel_id + "Queueing monitor update to ensure missing channel is force closed", ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), @@ -17018,6 +17011,7 @@ where should_broadcast: true, }], channel_id: Some(monitor.channel_id()), + encoded_channel: None, }; let funding_txo = monitor.get_funding_txo(); let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -17336,8 +17330,8 @@ where { // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", - chan_id, monitor.get_latest_update_id(), max_in_flight_update_id); + log_error!(logger, " The ChannelMonitor is at update_id {} with update_id through {} in-flight", + monitor.get_latest_update_id(), max_in_flight_update_id); log_error!( logger, " but the ChannelManager is at update_id {}.", @@ -17555,8 +17549,8 @@ where let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias && update_add_htlc.htlc_id == prev_hop_data.htlc_id; if matches { - log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); + log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel", + &htlc.payment_hash); } !matches }); @@ -17649,6 +17643,7 @@ where updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }], + encoded_channel: None, }, }); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c161a9664c0..35c7e049c89 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -5143,7 +5143,7 @@ pub fn test_fail_holding_cell_htlc_upon_free() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log( "lightning::ln::channel", - format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), + "Freeing holding cell with 1 HTLC updates".to_string(), 1, ); @@ -5254,7 +5254,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log( "lightning::ln::channel", - format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), + "Freeing holding cell with 2 HTLC updates".to_string(), 1, ); @@ -7336,7 +7336,7 @@ pub fn test_concurrent_monitor_claim() { // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain let chain_source = test_utils::TestChainSource::new(Network::Testnet); - let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); + let logger = test_utils::TestLogger::with_id("alice".to_string()); let persister = test_utils::TestPersister::new(); let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::new(Mutex::new( nodes[0].blocks.lock().unwrap().clone(), @@ -7387,7 +7387,7 @@ pub fn test_concurrent_monitor_claim() { // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first. let chain_source = test_utils::TestChainSource::new(Network::Testnet); - let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); + let logger = test_utils::TestLogger::with_id("bob".to_string()); let persister = test_utils::TestPersister::new(); let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); @@ -9843,6 +9843,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { check_added_monitors(&nodes[0], 3); } +#[cfg(not(feature = "safe_channels"))] #[xtest(feature = "_externalize_tests")] pub fn test_multi_post_event_actions() { do_test_multi_post_event_actions(true); diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index dc5d07c180e..ad68b773c3b 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -961,8 +961,12 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { }, _ => panic!("Unexpected event"), }; - nodes[1].logger.assert_log("lightning::ln::channel", - format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + nodes[1].logger.assert_log( + "lightning::ln::channel", + "Attempting to fail HTLC due to fee spike buffer violation. Rebalancing is required." + .to_string(), + 1, + ); check_added_monitors(&nodes[1], 3); } else { @@ -2408,8 +2412,12 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { }, _ => panic!("Unexpected event"), }; - nodes[1].logger.assert_log("lightning::ln::channel", - format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + nodes[1].logger.assert_log( + "lightning::ln::channel", + "Attempting to fail HTLC due to fee spike buffer violation. Rebalancing is required." + .to_string(), + 1, + ); check_added_monitors(&nodes[1], 3); } diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 7c0190a23a9..c3b3bc33ba9 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -403,7 +403,7 @@ where if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() { - log_trace!(logger, "Ignoring channel {} for invoice route hints", &channel.channel_id); + log_trace!(logger, "Ignoring channel for invoice route hints"); continue; } @@ -418,8 +418,7 @@ where // look at the public channels instead. log_trace!( logger, - "Not including channels in invoice route hints on account of public channel {}", - &channel.channel_id + "Not including channels in invoice route hints on account of public channel", ); return vec![].into_iter().take(MAX_CHANNEL_HINTS).map(route_hint_from_channel); } @@ -465,20 +464,20 @@ where if new_now_public || new_channel_preferable { log_trace!(logger, - "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", + "Preferring counterparty {} channel (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), - &channel.channel_id, channel.short_channel_id, + channel.short_channel_id, channel.inbound_capacity_msat, &entry.get().channel_id, entry.get().short_channel_id, current_max_capacity); entry.insert(channel); } else { log_trace!(logger, - "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", + "Preferring counterparty {} channel {} (SCID {:?}, {} msats) (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), &entry.get().channel_id, entry.get().short_channel_id, current_max_capacity, - &channel.channel_id, channel.short_channel_id, + channel.short_channel_id, channel.inbound_capacity_msat); } }, @@ -519,24 +518,15 @@ where }; if include_channel { - log_trace!( - logger, - "Including channel {} in invoice route hints", - &channel.channel_id - ); + log_trace!(logger, "Including channel in invoice route hints",); } else if !has_enough_capacity { log_trace!( logger, - "Ignoring channel {} without enough capacity for invoice route hints", - &channel.channel_id + "Ignoring channel without enough capacity for invoice route hints", ); } else { debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_announced)); - log_trace!( - logger, - "Ignoring channel {} with disconnected peer", - &channel.channel_id - ); + log_trace!(logger, "Ignoring channel with disconnected peer",); } include_channel diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 5a287585680..fed15dd98f3 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2388,6 +2388,7 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_restored_packages_retry() { do_test_restored_packages_retry(false); @@ -3023,6 +3024,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { assert_eq!(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]).len(), 6); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_anchors_aggregated_revoked_htlc_tx() { do_test_anchors_aggregated_revoked_htlc_tx(false); @@ -3116,6 +3118,7 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_anchors_monitor_fixes_counterparty_payment_script_on_reload() { do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(false); @@ -3343,6 +3346,7 @@ fn test_update_replay_panics() { monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_claim_event_never_handled() { // When a payment is claimed, the `ChannelMonitorUpdate` containing the payment preimage goes @@ -3570,6 +3574,7 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bo expect_payment_sent(&nodes[0], preimage_a, None, true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_lost_preimage_monitor_events() { do_test_lost_preimage_monitor_events(true, false); @@ -3815,6 +3820,7 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b expect_payment_failed!(nodes[0], hash_a, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_lost_timeout_monitor_events() { do_test_lost_timeout_monitor_events(CommitmentType::RevokedCounterparty, false, false); diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 3a6965c6646..6c3f6b17058 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2430,6 +2430,7 @@ fn rejects_keysend_to_non_static_invoice_path() { expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn no_double_pay_with_stale_channelmanager() { // This tests the following bug: diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index f4cfb9eda00..ae5eadd5416 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -1795,6 +1795,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { assert_eq!(config, config_after_restart); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_onion_failure_stale_channel_update() { do_test_onion_failure_stale_channel_update(false); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 9eb85173a83..c789db3d0a4 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -997,6 +997,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0].hops[0].fee_msat)); } +#[cfg(not(feature = "safe_channels"))] #[test] fn retry_with_no_persist() { do_retry_with_no_persist(true); @@ -1225,6 +1226,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_completed_payment_not_retryable_on_reload() { do_test_completed_payment_not_retryable_on_reload(true); @@ -1402,6 +1404,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_dup_htlc_onchain_doesnt_fail_on_reload() { do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, true, true, true); @@ -1415,6 +1418,7 @@ fn test_dup_htlc_onchain_doesnt_fail_on_reload() { do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, false, false, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_fulfill_restart_failure() { // When we receive an update_fulfill_htlc message, we immediately consider the HTLC fully @@ -2289,8 +2293,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); let err = format!( "Channel with id {} not found for the passed counterparty node_id {}", - log_bytes!([42; 32]), - node_c_id, + chan_id, node_c_id, ); assert_eq!(unknown_chan_id_err, Err(APIError::ChannelUnavailable { err })); @@ -4194,12 +4197,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: assert!(events.is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn no_missing_sent_on_midpoint_reload() { do_no_missing_sent_on_reload(false, true); do_no_missing_sent_on_reload(true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn no_missing_sent_on_reload() { do_no_missing_sent_on_reload(false, false); @@ -4908,6 +4913,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_payment_metadata_consistency() { do_test_payment_metadata_consistency(true, true); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 74f081b03ae..04a7e9cbea1 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -3102,9 +3102,16 @@ where self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendShutdown { ref node_id, ref msg } => { - log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling Shutdown event in peer_handler for node {} for channel {}", - node_id, - &msg.channel_id); + log_debug!( + WithContext::from( + &self.logger, + Some(*node_id), + Some(msg.channel_id), + None + ), + "Handling Shutdown event in peer_handler for node {}", + node_id + ); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index f19ca89097e..b5a2c1fda3d 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -30,6 +30,7 @@ use crate::prelude::*; use crate::ln::functional_test_utils::*; +#[cfg(not(feature = "safe_channels"))] #[test] fn test_priv_forwarding_rejection() { // If we have a private channel with outbound liquidity, and diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 7a5d35b12a5..3155bad7804 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -701,6 +701,7 @@ fn test_quiescence_during_disconnection() { do_test_quiescence_during_disconnection(true, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_quiescence_termination_on_disconnect() { do_test_quiescence_termination_on_disconnect(false); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 8c9e552fa04..c95d1abda61 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -190,6 +190,7 @@ fn test_funding_peer_disconnect() { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_no_txn_manager_serialize_deserialize() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -235,6 +236,7 @@ fn test_no_txn_manager_serialize_deserialize() { send_payment(&nodes[0], &[&nodes[1]], 1000000); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_manager_serialize_deserialize_events() { // This test makes sure the events field in ChannelManager survives de/serialization @@ -357,6 +359,7 @@ fn test_simple_manager_serialize_deserialize() { claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_manager_serialize_deserialize_inconsistent_monitor() { // Test deserializing a ChannelManager with an out-of-date ChannelMonitor @@ -705,7 +708,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, } #[test] -#[cfg(feature = "std")] +#[cfg(all(feature = "std", not(feature = "safe_channels")))] fn test_data_loss_protect() { do_test_data_loss_protect(true, false, true); do_test_data_loss_protect(true, true, false); @@ -911,6 +914,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_partial_claim_before_restart() { do_test_partial_claim_before_restart(false, false); @@ -1081,6 +1085,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } } +#[cfg(not(feature = "safe_channels"))] #[test] fn forwarded_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(true, true, false); @@ -1088,6 +1093,7 @@ fn forwarded_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(false, false, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn intercepted_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(true, true, true); @@ -1095,6 +1101,7 @@ fn intercepted_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(false, false, true); } +#[cfg(not(feature = "safe_channels"))] #[test] fn removed_payment_no_manager_persistence() { // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but @@ -1173,6 +1180,7 @@ fn removed_payment_no_manager_persistence() { expect_payment_failed!(nodes[0], payment_hash, false); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); @@ -1236,6 +1244,7 @@ fn test_reload_partial_funding_batch() { assert!(nodes[0].node.list_channels().is_empty()); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_htlc_localremoved_persistence() { // Tests that if we fail an htlc back (update_fail_htlc message) and then restart the node, the node will resend the @@ -1419,4 +1428,3 @@ fn test_peer_storage() { let res = std::panic::catch_unwind(|| drop(nodes)); assert!(res.is_err()); } - diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index ede6acfb639..3f0128f9245 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -410,6 +410,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ send_payment(&nodes[0], &[&nodes[1]], 8000000); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_unconf_chan() { do_test_unconf_chan(true, true, false, ConnectStyle::BestBlockFirstSkippingBlocks); @@ -423,6 +424,7 @@ fn test_unconf_chan() { do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_unconf_chan_via_listen() { do_test_unconf_chan(true, true, false, ConnectStyle::FullBlockViaListen); @@ -431,6 +433,7 @@ fn test_unconf_chan_via_listen() { do_test_unconf_chan(false, false, false, ConnectStyle::FullBlockViaListen); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_unconf_chan_via_funding_unconfirmed() { do_test_unconf_chan(true, true, true, ConnectStyle::BestBlockFirstSkippingBlocks); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index e387ac3bfdd..fa532d07582 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -383,6 +383,7 @@ pub fn lock_splice<'a, 'b, 'c, 'd>( node_b.chain_source.remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_splice_state_reset_on_disconnect() { do_test_splice_state_reset_on_disconnect(false); @@ -1019,6 +1020,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: } } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_splice_reestablish() { do_test_splice_reestablish(false, false); @@ -1282,6 +1284,7 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); } +#[cfg(not(feature = "safe_channels"))] #[test] fn test_propose_splice_while_disconnected() { do_test_propose_splice_while_disconnected(false, false); diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index 283d3158144..724e407de84 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -17,6 +17,7 @@ use bitcoin::secp256k1::PublicKey; use core::cmp; use core::fmt; +use core::fmt::Display; use core::ops::Deref; use crate::ln::types::ChannelId; @@ -106,7 +107,8 @@ pub struct Record<$($args)?> { /// generated. pub peer_id: Option, /// The channel id of the channel pertaining to the logged record. May be a temporary id before - /// the channel has been funded. + /// the channel has been funded. Since channel_id is not repeated in the message body, + /// include it in the log output so entries remain clear. pub channel_id: Option, #[cfg(not(c_bindings))] /// The message body. @@ -152,15 +154,26 @@ impl<$($args)?> Record<$($args)?> { } } } + +impl<$($args)?> Display for Record<$($args)?> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let chan_id = self.channel_id.map(|id| format!("{}", id)); + let chan_str = chan_id.as_deref().and_then(|s| s.get(..6)).unwrap_or(""); + let context = format!("{:<5} [{}:{}]", self.level, self.module_path, self.line); + write!(f, "{:<48} {:<6} {}", context, chan_str, self.args) + } +} } } #[cfg(not(c_bindings))] impl_record!('a, ); #[cfg(c_bindings)] impl_record!(, 'a); -/// A trait encapsulating the operations required of a logger. +/// A trait encapsulating the operations required of a logger. Keep in mind that log messages might not be entirely +/// self-explanatory and may need accompanying context fields to be fully understood. pub trait Logger { - /// Logs the [`Record`]. + /// Logs the [`Record`]. Since the record's [`Record::channel_id`] is not embedded in the message body, log + /// implementations should print it alongside the message to keep entries clear. fn log(&self, record: Record); } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index ad8ea224205..d0a8bb3fde6 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -1698,9 +1698,7 @@ impl TestLogger { impl Logger for TestLogger { fn log(&self, record: Record) { - let context = - format!("{} {} [{}:{}]", self.id, record.level, record.module_path, record.line); - let s = format!("{:<55} {}", context, record.args); + let s = format!("{:<6} {}", self.id, record); #[cfg(ldk_bench)] { // When benchmarking, we don't actually want to print logs, but we do want to format