-
Notifications
You must be signed in to change notification settings - Fork 417
Locktimed packages fixes #3923
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Locktimed packages fixes #3923
Changes from all commits
c915c99
92fe3aa
f916e13
9a95544
21be9c5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -269,6 +269,9 @@ pub struct OnchainTxHandler<ChannelSigner: EcdsaChannelSigner> { | |
#[cfg(not(any(test, feature = "_test_utils")))] | ||
claimable_outpoints: HashMap<BitcoinOutPoint, (ClaimId, u32)>, | ||
|
||
#[cfg(any(test, feature = "_test_utils"))] | ||
pub(crate) locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>, | ||
#[cfg(not(any(test, feature = "_test_utils")))] | ||
locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>, | ||
|
||
onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>, | ||
|
@@ -886,9 +889,10 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> { | |
// Because fuzzing can cause hash collisions, we can end up with conflicting claim | ||
// ids here, so we only assert when not fuzzing. | ||
debug_assert!(cfg!(fuzzing) || self.pending_claim_requests.get(&claim_id).is_none()); | ||
for k in req.outpoints() { | ||
log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout); | ||
self.claimable_outpoints.insert(k.clone(), (claim_id, conf_height)); | ||
for (k, outpoint_confirmation_height) in req.outpoints_and_creation_heights() { | ||
let creation_height = outpoint_confirmation_height.unwrap_or(conf_height); | ||
log_info!(logger, "Registering claiming request for {}:{}, which exists as of height {creation_height}", k.txid, k.vout); | ||
self.claimable_outpoints.insert(k.clone(), (claim_id, creation_height)); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hm, shouldn't we prefer There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In practice I think this would just result in us broadcasting things that cannot enter the mempool until we get back to the expected height. If we have any other claims that were merged into the same package for whatever reason, and they are still valid at the disconnected block height, then this would be a greater issue. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The problem is that when things time out via |
||
} | ||
self.pending_claim_requests.insert(claim_id, req); | ||
} | ||
|
@@ -994,6 +998,17 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> { | |
panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); | ||
} | ||
} | ||
|
||
// Also remove/split any locktimed packages whose inputs have been spent by this transaction. | ||
self.locktimed_packages.retain(|_locktime, packages|{ | ||
packages.retain_mut(|package| { | ||
if let Some(p) = package.split_package(&inp.previous_output) { | ||
claimed_outputs_material.push(p); | ||
} | ||
!package.outpoints().is_empty() | ||
}); | ||
!packages.is_empty() | ||
}); | ||
} | ||
for package in claimed_outputs_material.drain(..) { | ||
let entry = OnchainEventEntry { | ||
|
@@ -1135,6 +1150,13 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> { | |
//- resurect outpoint back in its claimable set and regenerate tx | ||
match entry.event { | ||
OnchainEvent::ContentiousOutpoint { package } => { | ||
// We pass 0 to `package_locktime` to get the actual required locktime. | ||
let package_locktime = package.package_locktime(0); | ||
if package_locktime >= height { | ||
self.locktimed_packages.entry(package_locktime).or_default().push(package); | ||
continue; | ||
} | ||
|
||
if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { | ||
if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { | ||
assert!(request.merge_package(package, height).is_ok()); | ||
|
@@ -1358,19 +1380,21 @@ mod tests { | |
holder_commit_txid, | ||
htlc.transaction_output_index.unwrap(), | ||
PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(HTLCDescriptor { | ||
channel_derivation_parameters: ChannelDerivationParameters { | ||
value_satoshis: tx_handler.channel_value_satoshis, | ||
keys_id: tx_handler.channel_keys_id, | ||
transaction_parameters: tx_handler.channel_transaction_parameters.clone(), | ||
channel_derivation_parameters: ChannelDerivationParameters { | ||
value_satoshis: tx_handler.channel_value_satoshis, | ||
keys_id: tx_handler.channel_keys_id, | ||
transaction_parameters: tx_handler.channel_transaction_parameters.clone(), | ||
}, | ||
commitment_txid: holder_commit_txid, | ||
per_commitment_number: holder_commit.commitment_number(), | ||
per_commitment_point: holder_commit.per_commitment_point(), | ||
feerate_per_kw: holder_commit.feerate_per_kw(), | ||
htlc: htlc.clone(), | ||
preimage: None, | ||
counterparty_sig: *counterparty_sig, | ||
}, | ||
commitment_txid: holder_commit_txid, | ||
per_commitment_number: holder_commit.commitment_number(), | ||
per_commitment_point: holder_commit.per_commitment_point(), | ||
feerate_per_kw: holder_commit.feerate_per_kw(), | ||
htlc: htlc.clone(), | ||
preimage: None, | ||
counterparty_sig: *counterparty_sig, | ||
})), | ||
0 | ||
)), | ||
0, | ||
)); | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks like we can get rid of
conf_height
now