Skip to content

Commit 2ebc372

Browse files
Fix async release before HTLC decode
Handle `ReleaseHeldHtlc` messages that arrive before the sender-side LSP has even queued the held HTLC for onion decoding. Unlike #4106, which covers releases arriving after the HTLC is in `decode_update_add_htlcs` but before it reaches `pending_intercepted_htlcs`, this preserves releases that arrive one step earlier and would otherwise be dropped as HTLC not found. Co-Authored-By: HAL 9000 Co-Authored-By: Elias Rohrer <dev@tnull.de>
1 parent 2bd09e4 commit 2ebc372

3 files changed

Lines changed: 213 additions & 0 deletions

File tree

lightning/src/ln/async_payments_tests.rs

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3458,3 +3458,167 @@ fn release_htlc_races_htlc_onion_decode() {
34583458
claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage));
34593459
assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(static_invoice)));
34603460
}
3461+
3462+
#[test]
3463+
fn async_payment_e2e_release_before_hold_registered() {
3464+
// Tests that an LSP will release a held htlc if the `ReleaseHeldHtlc` message was received
3465+
// before the HTLC was fully committed to the channel, which was previously broken.
3466+
let chanmon_cfgs = create_chanmon_cfgs(4);
3467+
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
3468+
3469+
let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg());
3470+
let mut sender_lsp_cfg = test_default_channel_config();
3471+
sender_lsp_cfg.enable_htlc_hold = true;
3472+
let mut invoice_server_cfg = test_default_channel_config();
3473+
invoice_server_cfg.accept_forwards_to_priv_channels = true;
3474+
3475+
let node_chanmgrs = create_node_chanmgrs(
3476+
4,
3477+
&node_cfgs,
3478+
&[Some(sender_cfg), Some(sender_lsp_cfg), Some(invoice_server_cfg), Some(recipient_cfg)],
3479+
);
3480+
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
3481+
create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
3482+
create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
3483+
create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
3484+
unify_blockheight_across_nodes(&nodes);
3485+
let sender = &nodes[0];
3486+
let sender_lsp = &nodes[1];
3487+
let invoice_server = &nodes[2];
3488+
let recipient = &nodes[3];
3489+
3490+
let recipient_id = vec![42; 32];
3491+
let inv_server_paths =
3492+
invoice_server.node.blinded_paths_for_async_recipient(recipient_id.clone(), None).unwrap();
3493+
recipient.node.set_paths_to_static_invoice_server(inv_server_paths).unwrap();
3494+
expect_offer_paths_requests(recipient, &[invoice_server, sender_lsp]);
3495+
let invoice_flow_res =
3496+
pass_static_invoice_server_messages(invoice_server, recipient, recipient_id.clone());
3497+
let invoice = invoice_flow_res.invoice;
3498+
let invreq_path = invoice_flow_res.invoice_request_path;
3499+
3500+
let offer = recipient.node.get_async_receive_offer().unwrap();
3501+
recipient.node.peer_disconnected(invoice_server.node.get_our_node_id());
3502+
recipient.onion_messenger.peer_disconnected(invoice_server.node.get_our_node_id());
3503+
invoice_server.node.peer_disconnected(recipient.node.get_our_node_id());
3504+
invoice_server.onion_messenger.peer_disconnected(recipient.node.get_our_node_id());
3505+
3506+
let amt_msat = 5000;
3507+
let payment_id = PaymentId([1; 32]);
3508+
sender.node.pay_for_offer(&offer, Some(amt_msat), payment_id, Default::default()).unwrap();
3509+
3510+
let (peer_id, invreq_om) = extract_invoice_request_om(sender, &[sender_lsp, invoice_server]);
3511+
invoice_server.onion_messenger.handle_onion_message(peer_id, &invreq_om);
3512+
3513+
let mut events = invoice_server.node.get_and_clear_pending_events();
3514+
assert_eq!(events.len(), 1);
3515+
let (reply_path, invreq) = match events.pop().unwrap() {
3516+
Event::StaticInvoiceRequested {
3517+
recipient_id: ev_id, reply_path, invoice_request, ..
3518+
} => {
3519+
assert_eq!(recipient_id, ev_id);
3520+
(reply_path, invoice_request)
3521+
},
3522+
_ => panic!(),
3523+
};
3524+
3525+
invoice_server
3526+
.node
3527+
.respond_to_static_invoice_request(invoice, reply_path, invreq, invreq_path)
3528+
.unwrap();
3529+
let (peer_node_id, static_invoice_om, static_invoice) =
3530+
extract_static_invoice_om(invoice_server, &[sender_lsp, sender]);
3531+
3532+
// Lock the HTLC in with the sender LSP, but stop before the sender's revoke_and_ack is handed
3533+
// back to the sender LSP. This reproduces the real LSPS2 timing where ReleaseHeldHtlc can
3534+
// arrive before the held HTLC is queued for decode on the sender LSP.
3535+
sender.onion_messenger.handle_onion_message(peer_node_id, &static_invoice_om);
3536+
check_added_monitors(sender, 1);
3537+
let commitment_update = get_htlc_update_msgs(&sender, &sender_lsp.node.get_our_node_id());
3538+
let update_add = commitment_update.update_add_htlcs[0].clone();
3539+
let payment_hash = update_add.payment_hash;
3540+
assert!(update_add.hold_htlc.is_some());
3541+
sender_lsp.node.handle_update_add_htlc(sender.node.get_our_node_id(), &update_add);
3542+
sender_lsp.node.handle_commitment_signed_batch_test(
3543+
sender.node.get_our_node_id(),
3544+
&commitment_update.commitment_signed,
3545+
);
3546+
check_added_monitors(sender_lsp, 1);
3547+
let (_extra_msg_option, sender_raa, sender_holding_cell_htlcs) =
3548+
do_main_commitment_signed_dance(sender_lsp, sender, false);
3549+
assert!(sender_holding_cell_htlcs.is_empty());
3550+
3551+
let held_htlc_om_to_inv_server = sender
3552+
.onion_messenger
3553+
.next_onion_message_for_peer(invoice_server.node.get_our_node_id())
3554+
.unwrap();
3555+
invoice_server
3556+
.onion_messenger
3557+
.handle_onion_message(sender_lsp.node.get_our_node_id(), &held_htlc_om_to_inv_server);
3558+
3559+
let mut events_rc = core::cell::RefCell::new(Vec::new());
3560+
invoice_server.onion_messenger.process_pending_events(&|e| Ok(events_rc.borrow_mut().push(e)));
3561+
let events = events_rc.into_inner();
3562+
let held_htlc_om = events
3563+
.into_iter()
3564+
.find_map(|ev| {
3565+
if let Event::OnionMessageIntercepted { message, .. } = ev {
3566+
let peeled_onion = recipient.onion_messenger.peel_onion_message(&message).unwrap();
3567+
if matches!(
3568+
peeled_onion,
3569+
PeeledOnion::Offers(OffersMessage::InvoiceRequest { .. }, _, _)
3570+
) {
3571+
return None;
3572+
}
3573+
3574+
assert!(matches!(
3575+
peeled_onion,
3576+
PeeledOnion::AsyncPayments(AsyncPaymentsMessage::HeldHtlcAvailable(_), _, _)
3577+
));
3578+
Some(message)
3579+
} else {
3580+
None
3581+
}
3582+
})
3583+
.unwrap();
3584+
3585+
let mut reconnect_args = ReconnectArgs::new(invoice_server, recipient);
3586+
reconnect_args.send_channel_ready = (true, true);
3587+
reconnect_nodes(reconnect_args);
3588+
3589+
let events = core::cell::RefCell::new(Vec::new());
3590+
invoice_server.onion_messenger.process_pending_events(&|e| Ok(events.borrow_mut().push(e)));
3591+
assert_eq!(events.borrow().len(), 1);
3592+
assert!(matches!(events.into_inner().pop().unwrap(), Event::OnionMessagePeerConnected { .. }));
3593+
expect_offer_paths_requests(recipient, &[invoice_server]);
3594+
3595+
recipient
3596+
.onion_messenger
3597+
.handle_onion_message(invoice_server.node.get_our_node_id(), &held_htlc_om);
3598+
let (peer_id, release_htlc_om) =
3599+
extract_release_htlc_oms(recipient, &[sender, sender_lsp, invoice_server]).pop().unwrap();
3600+
sender_lsp.onion_messenger.handle_onion_message(peer_id, &release_htlc_om);
3601+
3602+
// Now let the sender LSP receive the sender's revoke_and_ack and continue processing the held
3603+
// HTLC, which previously would've resulted in holding the HTLC even though the release message
3604+
// was already received.
3605+
sender_lsp.node.handle_revoke_and_ack(sender.node.get_our_node_id(), &sender_raa);
3606+
check_added_monitors(sender_lsp, 1);
3607+
assert!(sender_lsp.node.get_and_clear_pending_msg_events().is_empty());
3608+
sender_lsp.node.process_pending_htlc_forwards();
3609+
let mut events = sender_lsp.node.get_and_clear_pending_msg_events();
3610+
assert_eq!(events.len(), 1);
3611+
let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events);
3612+
check_added_monitors(&sender_lsp, 1);
3613+
3614+
let path: &[&Node] = &[invoice_server, recipient];
3615+
let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev)
3616+
.with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]);
3617+
let claimable_ev = do_pass_along_path(args).unwrap();
3618+
3619+
let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]];
3620+
let keysend_preimage = extract_payment_preimage(&claimable_ev);
3621+
let (res, _) =
3622+
claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage));
3623+
assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(static_invoice)));
3624+
}

lightning/src/ln/channel.rs

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8108,6 +8108,43 @@ where
81088108
debug_assert!(false, "If we go to prune an inbound HTLC it should be present")
81098109
}
81108110

8111+
/// Clears the `hold_htlc` flag for a pending inbound HTLC, returning `true` if the HTLC was
8112+
/// successfully released. Useful when a [`ReleaseHeldHtlc`] onion message arrives before the
8113+
/// HTLC has been fully committed.
8114+
///
8115+
/// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc
8116+
pub(super) fn release_pending_inbound_held_htlc(&mut self, htlc_id: u64) -> bool {
8117+
for update_add in self.context.monitor_pending_update_adds.iter_mut() {
8118+
if update_add.htlc_id == htlc_id {
8119+
update_add.hold_htlc.take();
8120+
return true;
8121+
}
8122+
}
8123+
for htlc in self.context.pending_inbound_htlcs.iter_mut() {
8124+
if htlc.htlc_id != htlc_id {
8125+
continue;
8126+
}
8127+
match &mut htlc.state {
8128+
// Clearing `hold_htlc` here directly affects the copy that will be cloned into the decode
8129+
// pipeline when RAA promotes the HTLC.
8130+
InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending {
8131+
update_add_htlc,
8132+
})
8133+
| InboundHTLCState::AwaitingRemoteRevokeToAnnounce(
8134+
InboundHTLCResolution::Pending { update_add_htlc },
8135+
)
8136+
| InboundHTLCState::AwaitingAnnouncedRemoteRevoke(
8137+
InboundHTLCResolution::Pending { update_add_htlc },
8138+
) => {
8139+
update_add_htlc.hold_htlc.take();
8140+
return true;
8141+
},
8142+
_ => return false,
8143+
}
8144+
}
8145+
false
8146+
}
8147+
81118148
/// Useful for testing crash scenarios where the holding cell is not persisted.
81128149
#[cfg(test)]
81138150
pub(super) fn test_clear_holding_cell(&mut self) {

lightning/src/ln/channelmanager.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17189,6 +17189,18 @@ impl<
1718917189
htlc_id,
1719017190
} => {
1719117191
let _serialize_guard = PersistenceNotifierGuard::notify_on_drop(self);
17192+
// It's possible the release_held_htlc message raced ahead of us fully committing to the
17193+
// HTLC. If that's the case, update the pending update_add to indicate that the HTLC should
17194+
// be released immediately.
17195+
let released_pre_commitment_htlc = self
17196+
.do_funded_channel_callback(prev_outbound_scid_alias, |chan| {
17197+
chan.release_pending_inbound_held_htlc(htlc_id)
17198+
})
17199+
.unwrap_or(false);
17200+
if released_pre_commitment_htlc {
17201+
return;
17202+
}
17203+
1719217204
// It's possible the release_held_htlc message raced ahead of us transitioning the pending
1719317205
// update_add to `Self::pending_intercept_htlcs`. If that's the case, update the pending
1719417206
// update_add to indicate that the HTLC should be released immediately.

0 commit comments

Comments
 (0)