From f99d519508bf971e88ee41ee8e98dc4578509694 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 16 Dec 2025 13:37:23 +0200 Subject: [PATCH 01/23] ln/events: multiple htlcs in/out for trampoline PaymentForwarded --- .../tests/lsps2_integration_tests.rs | 8 +- lightning/src/events/mod.rs | 144 +++++++++++------- lightning/src/ln/channelmanager.rs | 16 +- lightning/src/ln/functional_test_utils.rs | 42 ++--- lightning/src/ln/functional_tests.rs | 26 ++-- lightning/src/util/ser.rs | 1 + 6 files changed, 136 insertions(+), 101 deletions(-) diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index e4ace27b715..92052891dd5 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1338,14 +1338,14 @@ fn client_trusts_lsp_end_to_end_test() { let total_fee_msat = match service_events[0].clone() { Event::PaymentForwarded { - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, skimmed_fee_msat, total_fee_earned_msat, .. } => { - assert_eq!(prev_node_id, Some(payer_node_id)); - assert_eq!(next_node_id, Some(client_node_id)); + assert_eq!(prev_htlcs[0].node_id, Some(payer_node_id)); + assert_eq!(next_htlcs[0].node_id, Some(client_node_id)); service_handler.payment_forwarded(channel_id, skimmed_fee_msat.unwrap_or(0)).unwrap(); Some(total_fee_earned_msat.unwrap() - skimmed_fee_msat.unwrap()) }, diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d97ae6097b6..c5deed57a7f 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -729,6 +729,25 @@ pub enum InboundChannelFunds { DualFunded, } +/// Identifies the channel and peer committed to a HTLC, used for both incoming and outgoing HTLCs. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HTLCLocator { + /// The channel that the HTLC was sent or received on. + pub channel_id: ChannelId, + + /// The `user_channel_id` for `channel_id`. + pub user_channel_id: Option, + + /// The public key identify of the node that the HTLC was sent to or received from. + pub node_id: Option, +} + +impl_writeable_tlv_based!(HTLCLocator, { + (1, channel_id, required), + (3, user_channel_id, option), + (5, node_id, option), +}); + /// An Event which you should probably take some action in response to. /// /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use @@ -1316,34 +1335,14 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. PaymentForwarded { - /// The channel id of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - prev_channel_id: Option, - /// The channel id of the outgoing channel between the next node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - next_channel_id: Option, - /// The `user_channel_id` of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.122. - prev_user_channel_id: Option, - /// The `user_channel_id` of the outgoing channel between the next node and us. - /// - /// This will be `None` if the payment was settled via an on-chain transaction. See the - /// caveat described for the `total_fee_earned_msat` field. Moreover it will be `None` for - /// events generated or serialized by versions prior to 0.0.122. - next_user_channel_id: Option, - /// The node id of the previous node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - prev_node_id: Option, - /// The node id of the next node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - next_node_id: Option, + /// The set of HTLCs forwarded to our node that will be claimed by this forward. Contains a + /// single HTLC for source-routed payments, and may contain multiple HTLCs when we acted as + /// a trampoline router, responsible for pathfinding within the route. + prev_htlcs: Vec, + /// The set of HTLCs forwarded by our node that have been claimed by this forward. Contains + /// a single HTLC for regular source-routed payments, and may contain multiple HTLCs when + /// we acted as a trampoline router, responsible for pathfinding within the route. + next_htlcs: Vec, /// The total fee, in milli-satoshis, which was earned as a result of the payment. /// /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC @@ -2027,12 +2026,8 @@ impl Writeable for Event { }); }, &Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, @@ -2041,15 +2036,20 @@ impl Writeable for Event { 7u8.write(writer)?; write_tlv_fields!(writer, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + // Type 1 was prev_channel_id in 0.2 and earlier. (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + // Type 3 was next_channel_id in 0.2 and earlier. (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + // Type 9 was prev_user_channel_id in 0.2 and earlier. + // Type 11 was next_user_channel_id in 0.2 and earlier. + // Type 13 was prev_node_id in 0.2 and earlier. + // Type 15 was next_node_id in 0.2 and earlier. + // HTLCs are written as required, rather than required_vec, so that they can be + // deserialized using default_value to fill in legacy fields which expects + // LengthReadable (requried_vec is WithoutLength). + (17, *prev_htlcs, required), + (19, *next_htlcs, required), }); }, &Event::ChannelClosed { @@ -2544,35 +2544,63 @@ impl MaybeReadable for Event { }, 7u8 => { let mut f = || { - let mut prev_channel_id = None; - let mut next_channel_id = None; - let mut prev_user_channel_id = None; - let mut next_user_channel_id = None; - let mut prev_node_id = None; - let mut next_node_id = None; + // Legacy values that have been replaced by prev_htlcs and next_htlcs. + let mut prev_channel_id_legacy = None; + let mut next_channel_id_legacy = None; + let mut prev_user_channel_id_legacy = None; + let mut next_user_channel_id_legacy = None; + let mut prev_node_id_legacy = None; + let mut next_node_id_legacy = None; + let mut total_fee_earned_msat = None; let mut skimmed_fee_msat = None; let mut claim_from_onchain_tx = false; let mut outbound_amount_forwarded_msat = None; + let mut prev_htlcs = vec![]; + let mut next_htlcs = vec![]; read_tlv_fields!(reader, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + (1, prev_channel_id_legacy, option), (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + (3, next_channel_id_legacy, option), (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + (9, prev_user_channel_id_legacy, option), + (11, next_user_channel_id_legacy, option), + (13, prev_node_id_legacy, option), + (15, next_node_id_legacy, option), + // We can't unwrap prev_channel_id_legacy or next_channel_id_legacy here + // because default_value eagerly evaluates the default, so events that do + // not have legacy fields would fail. We settle for setting a zero ChannelID + // and replacing it below. + (17, prev_htlcs, (default_value, vec![HTLCLocator{ + channel_id: ChannelId::new_zero(), + user_channel_id: prev_user_channel_id_legacy, + node_id: prev_node_id_legacy, + }])), + (19, next_htlcs, (default_value, vec![HTLCLocator{ + channel_id: ChannelId::new_zero(), + user_channel_id: next_user_channel_id_legacy, + node_id: next_node_id_legacy, + }])), }); + + // If dealing with legacy serialization, we can be confident that we'll + // replace the zero value placeholders above because these fields were only + // None for events serialized before 0.0.107. We do not allow nodes with pending + // forwards to be upgraded directly to 0.1 from versions 0.0.123 or earlier, + // so we should always have Some here when reading legacy serialization. + if let Some(prev_channel_id) = prev_channel_id_legacy { + prev_htlcs[0].channel_id = prev_channel_id; + } + + if let Some(next_channel_id) = next_channel_id_legacy { + next_htlcs[0].channel_id = next_channel_id; + } + Ok(Some(Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f3399ff8787..ada21b8bdaf 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9520,12 +9520,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ( Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { event: events::Event::PaymentForwarded { - prev_channel_id: Some(prev_channel_id), - next_channel_id: Some(next_channel_id), - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id: Some(next_channel_counterparty_node_id), + prev_htlcs: vec![events::HTLCLocator { + channel_id: prev_channel_id, + user_channel_id: prev_user_channel_id, + node_id: prev_node_id, + }], + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx: from_onchain, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e072deb6a97..33fe0c72cef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3059,17 +3059,16 @@ pub fn expect_payment_forwarded>( ) -> Option { match event { Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, .. } => { + assert_eq!(prev_htlcs.len(), 1); + assert_eq!(next_htlcs.len(), 1); + if allow_1_msat_fee_overpay { // Aggregating fees for blinded paths may result in a rounding error, causing slight // overpayment in fees. @@ -3084,33 +3083,36 @@ pub fn expect_payment_forwarded>( // overpaid amount. assert!(skimmed_fee_msat == expected_extra_fees_msat); if !upstream_force_closed { - assert_eq!(prev_node.node().get_our_node_id(), prev_node_id.unwrap()); + let prev_node_id = prev_htlcs[0].node_id.unwrap(); + let prev_channel_id = prev_htlcs[0].channel_id; + let prev_user_channel_id = prev_htlcs[0].user_channel_id.unwrap(); + + assert_eq!(prev_node.node().get_our_node_id(), prev_node_id); // Is the event prev_channel_id in one of the channels between the two nodes? let node_chans = node.node().list_channels(); - assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id.unwrap() - && x.channel_id == prev_channel_id.unwrap() - && x.user_channel_id == prev_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id + && x.channel_id == prev_channel_id + && x.user_channel_id == prev_user_channel_id)); } // We check for force closures since a force closed channel is removed from the // node's channel list if !downstream_force_closed { + let next_node_id = next_htlcs[0].node_id.unwrap(); + let next_channel_id = next_htlcs[0].channel_id; + let next_user_channel_id = next_htlcs[0].user_channel_id.unwrap(); // As documented, `next_user_channel_id` will only be `Some` if we didn't settle via an // onchain transaction, just as the `total_fee_earned_msat` field. Rather than // introducing yet another variable, we use the latter's state as a flag to detect // this and only check if it's `Some`. - assert_eq!(next_node.node().get_our_node_id(), next_node_id.unwrap()); + assert_eq!(next_node.node().get_our_node_id(), next_node_id); let node_chans = node.node().list_channels(); if total_fee_earned_msat.is_none() { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id)); } else { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap() - && x.user_channel_id == next_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id + && x.user_channel_id == next_user_channel_id)); } } assert_eq!(claim_from_onchain_tx, downstream_force_closed); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e2963dbeb09..933cb2d2cdc 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1459,37 +1459,37 @@ pub fn test_htlc_on_chain_success() { connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let forwarded_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(forwarded_events.len(), 3); - let chan_id = Some(chan_1.2); + let chan_id = chan_1.2; match forwarded_events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), } match forwarded_events[1] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), @@ -3965,17 +3965,17 @@ pub fn test_onchain_to_onchain_claim() { assert_eq!(events.len(), 2); match events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, Some(chan_1.2)); + assert_eq!(prev_htlcs[0].channel_id, chan_1.2); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!("Unexpected event"), diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..249505f2e21 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1100,6 +1100,7 @@ impl_for_vec!(crate::routing::router::TrampolineHop); impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); impl_for_vec!(u32); +impl_for_vec!(crate::events::HTLCLocator); impl Writeable for Vec { #[inline] From 68669ba1e848d104205d72086b5b74e11fd228de Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 14 Jan 2026 11:37:11 -0500 Subject: [PATCH 02/23] f Write legacy fields for backwards compatibility To allow downgrade, we continue to write legacy fields. --- lightning/src/events/mod.rs | 47 +++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index c5deed57a7f..06f2a25a0dc 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -44,7 +44,7 @@ use crate::util::ser::{ WithoutLength, Writeable, Writer, }; -use crate::io; +use crate::io::{self, ErrorKind::InvalidData as IOInvalidData}; use crate::sync::Arc; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -1331,6 +1331,10 @@ pub enum Event { /// This event is generated when a payment has been successfully forwarded through us and a /// forwarding fee earned. /// + /// Note that downgrading from 0.3 with pending trampoline forwards that use multipart payments + /// will produce an event that only provides information about the first htlc that was + /// received/dispatched. + /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. @@ -2034,17 +2038,20 @@ impl Writeable for Event { outbound_amount_forwarded_msat, } => { 7u8.write(writer)?; + // Fields 1, 3, 9, 11, 13 and 15 are written for backwards compatibility. + let legacy_prev = prev_htlcs.first().ok_or(io::Error::from(IOInvalidData))?; + let legacy_next = next_htlcs.first().ok_or(io::Error::from(IOInvalidData))?; write_tlv_fields!(writer, { (0, total_fee_earned_msat, option), - // Type 1 was prev_channel_id in 0.2 and earlier. + (1, Some(legacy_prev.channel_id), option), (2, claim_from_onchain_tx, required), - // Type 3 was next_channel_id in 0.2 and earlier. + (3, Some(legacy_next.channel_id), option), (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - // Type 9 was prev_user_channel_id in 0.2 and earlier. - // Type 11 was next_user_channel_id in 0.2 and earlier. - // Type 13 was prev_node_id in 0.2 and earlier. - // Type 15 was next_node_id in 0.2 and earlier. + (9, legacy_prev.user_channel_id, option), + (11, legacy_next.user_channel_id, option), + (13, legacy_prev.node_id, option), + (15, legacy_next.node_id, option), // HTLCs are written as required, rather than required_vec, so that they can be // deserialized using default_value to fill in legacy fields which expects // LengthReadable (requried_vec is WithoutLength). @@ -2569,35 +2576,23 @@ impl MaybeReadable for Event { (11, next_user_channel_id_legacy, option), (13, prev_node_id_legacy, option), (15, next_node_id_legacy, option), - // We can't unwrap prev_channel_id_legacy or next_channel_id_legacy here - // because default_value eagerly evaluates the default, so events that do - // not have legacy fields would fail. We settle for setting a zero ChannelID - // and replacing it below. + // We can unwrap in the eagerly-evaluated default_value code because we + // always write legacy fields to be backwards compatible, and expect + // this field to be set because the legacy field was only None for versions + // before 0.0.107 and we do not allow upgrades with pending forwards to 0.1 + // for any version before 0.0.123. (17, prev_htlcs, (default_value, vec![HTLCLocator{ - channel_id: ChannelId::new_zero(), + channel_id: prev_channel_id_legacy.unwrap(), user_channel_id: prev_user_channel_id_legacy, node_id: prev_node_id_legacy, }])), (19, next_htlcs, (default_value, vec![HTLCLocator{ - channel_id: ChannelId::new_zero(), + channel_id: next_channel_id_legacy.unwrap(), user_channel_id: next_user_channel_id_legacy, node_id: next_node_id_legacy, }])), }); - // If dealing with legacy serialization, we can be confident that we'll - // replace the zero value placeholders above because these fields were only - // None for events serialized before 0.0.107. We do not allow nodes with pending - // forwards to be upgraded directly to 0.1 from versions 0.0.123 or earlier, - // so we should always have Some here when reading legacy serialization. - if let Some(prev_channel_id) = prev_channel_id_legacy { - prev_htlcs[0].channel_id = prev_channel_id; - } - - if let Some(next_channel_id) = next_channel_id_legacy { - next_htlcs[0].channel_id = next_channel_id; - } - Ok(Some(Event::PaymentForwarded { prev_htlcs, next_htlcs, From 6bd32c6b2776d7fd34c2a68347767119583b746f Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 16 Jan 2026 10:02:40 -0500 Subject: [PATCH 03/23] f typos in PaymentForwarded doc and comment --- lightning/src/events/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 06f2a25a0dc..6a123eadb90 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -738,7 +738,7 @@ pub struct HTLCLocator { /// The `user_channel_id` for `channel_id`. pub user_channel_id: Option, - /// The public key identify of the node that the HTLC was sent to or received from. + /// The public key identity of the node that the HTLC was sent to or received from. pub node_id: Option, } @@ -2054,7 +2054,7 @@ impl Writeable for Event { (15, legacy_next.node_id, option), // HTLCs are written as required, rather than required_vec, so that they can be // deserialized using default_value to fill in legacy fields which expects - // LengthReadable (requried_vec is WithoutLength). + // LengthReadable (required_vec is WithoutLength). (17, *prev_htlcs, required), (19, *next_htlcs, required), }); From 83ef21a30c56db6842c64295dc5c1c47a2da7e18 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 16 Dec 2025 15:14:55 +0200 Subject: [PATCH 04/23] ln: make event optional in EmitEventAndFreeOtherChannel In the commits that follow, we want to be able to free the other channel without emitting an event so that we can emit a single event for trampoline payments with multiple incoming HTLCs. We still want to go through the full claim flow for each incoming HTLC (and persist the EmitEventAndFreeOtherChannel event to be picked up on restart), but do not want multiple events for the same trampoline forward. Changing from upgradable_required to upgradable_option is forwards compatible - old versions of the software will always have written this field, newer versions don't require it to be there but will be able to read it as-is. This change is not backwards compatible, because older versions of the software will expect the field to be present but newer versions may not write it. An alternative would be to add a new event type, but that would need to have an even TLV (because the event must be understood and processed on restart to claim the incoming HTLC), so that option isn't backwards compatible either. --- CHANGELOG.md | 5 +++++ lightning/src/ln/channelmanager.rs | 15 ++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a51c5fda8bd..5c29e63dc57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3 + +## Backwards Compatibility +* Downgrade is not possible while the node has in-flight trampoline forwards. + # 0.2 - Dec 2, 2025 - "Natively Asynchronous Splicing" ## API Updates diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ada21b8bdaf..3369f8031b9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1321,7 +1321,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the /// outbound edge. EmitEventAndFreeOtherChannel { - event: events::Event, + event: Option, downstream_counterparty_and_funding_outpoint: Option, }, /// Indicates we should immediately resume the operation of another channel, unless there is @@ -1356,7 +1356,10 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (5, downstream_channel_id, required), }, (2, EmitEventAndFreeOtherChannel) => { - (0, event, upgradable_required), + // LDK prior to 0.3 required this field. It will not be present for trampoline payments + // with multiple incoming HTLCS, so nodes cannot downgrade while trampoline payments + // are in the process of being resolved. + (0, event, upgradable_option), // LDK prior to 0.0.116 did not have this field as the monitor update application order was // required by clients. If we downgrade to something prior to 0.0.116 this may result in // monitor updates which aren't properly blocked or resumed, however that's fine - we don't @@ -9519,7 +9522,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); ( Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event: events::Event::PaymentForwarded { + event: Some(events::Event::PaymentForwarded { prev_htlcs: vec![events::HTLCLocator { channel_id: prev_channel_id, user_channel_id: prev_user_channel_id, @@ -9534,7 +9537,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ skimmed_fee_msat, claim_from_onchain_tx: from_onchain, outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }, + }), downstream_counterparty_and_funding_outpoint: chan_to_release, }), None, @@ -9754,7 +9757,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ event, downstream_counterparty_and_funding_outpoint, } => { - self.pending_events.lock().unwrap().push_back((event, None)); + if let Some(event) = event { + self.pending_events.lock().unwrap().push_back((event, None)); + } if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { self.handle_monitor_update_release( unblocked.counterparty_node_id, From 9a309653f6fb1cab66da44031657a7fb2e107d6b Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 12 Jan 2026 14:02:04 -0500 Subject: [PATCH 05/23] f Move changelog downgrade note to pending_changelog --- CHANGELOG.md | 5 ----- pending_changelog/4304.txt | 3 +++ 2 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 pending_changelog/4304.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c29e63dc57..a51c5fda8bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,3 @@ -# 0.3 - -## Backwards Compatibility -* Downgrade is not possible while the node has in-flight trampoline forwards. - # 0.2 - Dec 2, 2025 - "Natively Asynchronous Splicing" ## API Updates diff --git a/pending_changelog/4304.txt b/pending_changelog/4304.txt new file mode 100644 index 00000000000..8c1580a2f4c --- /dev/null +++ b/pending_changelog/4304.txt @@ -0,0 +1,3 @@ +## Backwards Compatibility + +* Downgrade is not possible while the node has in-flight trampoline forwards. From 2ec14f13571797464225156aa0af11eeb012af20 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 7 Jan 2026 15:36:30 -0500 Subject: [PATCH 06/23] ln/refactor: rename EmitEventAndFreeOtherChannel to note optional event --- lightning/src/ln/channelmanager.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3369f8031b9..4dcb3c29657 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1320,7 +1320,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// completes a monitor update containing the payment preimage. In that case, after the inbound /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the /// outbound edge. - EmitEventAndFreeOtherChannel { + EmitEventOptionAndFreeOtherChannel { event: Option, downstream_counterparty_and_funding_outpoint: Option, }, @@ -1331,8 +1331,8 @@ pub(crate) enum MonitorUpdateCompletionAction { /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge /// from completing a monitor update which removes the payment preimage until the inbound edge /// completes a monitor update containing the payment preimage. However, we use this variant - /// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in - /// fact duplicative and we simply want to resume the outbound edge channel immediately. + /// instead of [`Self::EmitEventOptionAndFreeOtherChannel`] when we discover that the claim was + /// in fact duplicative and we simply want to resume the outbound edge channel immediately. /// /// This variant should thus never be written to disk, as it is processed inline rather than /// stored for later processing. @@ -1355,7 +1355,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (4, blocking_action, upgradable_required), (5, downstream_channel_id, required), }, - (2, EmitEventAndFreeOtherChannel) => { + (2, EmitEventOptionAndFreeOtherChannel) => { // LDK prior to 0.3 required this field. It will not be present for trampoline payments // with multiple incoming HTLCS, so nodes cannot downgrade while trampoline payments // are in the process of being resolved. @@ -9521,7 +9521,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "skimmed_fee_msat must always be included in total_fee_earned_msat" ); ( - Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { event: Some(events::Event::PaymentForwarded { prev_htlcs: vec![events::HTLCLocator { channel_id: prev_channel_id, @@ -9753,7 +9753,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { event, downstream_counterparty_and_funding_outpoint, } => { @@ -18454,7 +18454,7 @@ where let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); for action in actions.iter() { - if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + if let MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: Some(EventUnblockedChannel { counterparty_node_id: blocked_node_id, From 10bc10680bceb0a7ca86b016cad000e0ee2b3238 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 7 Jan 2026 15:05:44 -0500 Subject: [PATCH 07/23] ln+events: allow multiple prev_channel_id in HTLCHandlingFailed In preparation for trampoline failures, allow multiple previous channel ids. We'll only emit a single HTLCHandlingFailed for all of our failed back HTLCs, so we want to be able to express all of them in one event. --- lightning/src/events/mod.rs | 23 ++++++++++++++++------- lightning/src/ln/channelmanager.rs | 4 ++-- lightning/src/ln/monitor_tests.rs | 4 ++-- lightning/src/util/ser.rs | 1 + 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 6a123eadb90..b6d6e8fba3f 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1667,8 +1667,9 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. HTLCHandlingFailed { - /// The channel over which the HTLC was received. - prev_channel_id: ChannelId, + /// The channel(s) over which the HTLC(s) was received. May contain multiple entries for + /// trampoline forwards. + prev_channel_ids: Vec, /// The type of HTLC handling that failed. failure_type: HTLCHandlingFailureType, /// The reason that the HTLC failed. @@ -2204,15 +2205,16 @@ impl Writeable for Event { }) }, &Event::HTLCHandlingFailed { - ref prev_channel_id, + ref prev_channel_ids, ref failure_type, ref failure_reason, } => { 25u8.write(writer)?; write_tlv_fields!(writer, { - (0, prev_channel_id, required), + // Type 0 was prev_channel_id in 0.2 and earlier. (1, failure_reason, option), (2, failure_type, required), + (3, *prev_channel_ids, required), }) }, &Event::BumpTransaction(ref event) => { @@ -2784,13 +2786,19 @@ impl MaybeReadable for Event { }, 25u8 => { let mut f = || { - let mut prev_channel_id = ChannelId::new_zero(); + let mut prev_channel_id_legacy = None; let mut failure_reason = None; let mut failure_type_opt = UpgradableRequired(None); + let mut prev_channel_ids = vec![]; read_tlv_fields!(reader, { - (0, prev_channel_id, required), + (0, prev_channel_id_legacy, option), (1, failure_reason, option), (2, failure_type_opt, upgradable_required), + // If our new prev_channel_ids field is not present, the legacy field + // must be because it used to be required. + (3, prev_channel_ids, (default_value, vec![ + prev_channel_id_legacy.ok_or(msgs::DecodeError::InvalidValue)?, + ])), }); // If a legacy HTLCHandlingFailureType::UnknownNextHop was written, upgrade @@ -2804,8 +2812,9 @@ impl MaybeReadable for Event { }); failure_reason = Some(LocalHTLCFailureReason::UnknownNextPeer.into()); } + Ok(Some(Event::HTLCHandlingFailed { - prev_channel_id, + prev_channel_ids, failure_type: _init_tlv_based_struct_field!( failure_type_opt, upgradable_required diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4dcb3c29657..72df726d9c7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7231,7 +7231,7 @@ where .push(failure); self.pending_events.lock().unwrap().push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: incoming_channel_id, + prev_channel_ids: vec![incoming_channel_id], failure_type, failure_reason: Some(failure_reason), }, @@ -8792,7 +8792,7 @@ where let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: *channel_id, + prev_channel_ids: vec![*channel_id], failure_type, failure_reason: Some(onion_error.into()), }, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 04915affa20..a754c4fb81f 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3792,8 +3792,8 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, Some(hash_b)); }, - Event::HTLCHandlingFailed { prev_channel_id, .. } => { - assert_eq!(prev_channel_id, chan_a); + Event::HTLCHandlingFailed { prev_channel_ids, .. } => { + assert_eq!(prev_channel_ids[0], chan_a); }, _ => panic!("Wrong event {ev:?}"), } diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 249505f2e21..24e535b70bb 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1101,6 +1101,7 @@ impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); impl_for_vec!(u32); impl_for_vec!(crate::events::HTLCLocator); +impl_for_vec!(crate::ln::types::ChannelId); impl Writeable for Vec { #[inline] From 5e078bdca9a56e5db5161a2620bd43240a54d3fa Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 14 Jan 2026 15:47:30 -0500 Subject: [PATCH 08/23] f write legacy channel_id in HTLCHandlingFailed for backwards compat --- lightning/src/events/mod.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b6d6e8fba3f..b0921509692 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1663,6 +1663,10 @@ pub enum Event { /// Indicates that the HTLC was accepted, but could not be processed when or after attempting to /// forward it. /// + /// Note that downgrading from 0.3 with pending trampoline forwards that have incoming multipart + /// payments will produce an event that only provides information about the first htlc that was + /// received/dispatched. + /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. @@ -2210,8 +2214,11 @@ impl Writeable for Event { ref failure_reason, } => { 25u8.write(writer)?; + let legacy_chan_id = + prev_channel_ids.first().ok_or(io::Error::from(IOInvalidData))?; write_tlv_fields!(writer, { - // Type 0 was prev_channel_id in 0.2 and earlier. + // Write legacy field to remain backwards compatible. + (0, legacy_chan_id, required), (1, failure_reason, option), (2, failure_type, required), (3, *prev_channel_ids, required), @@ -2786,18 +2793,16 @@ impl MaybeReadable for Event { }, 25u8 => { let mut f = || { - let mut prev_channel_id_legacy = None; + let mut prev_channel_id_legacy = ChannelId::new_zero(); let mut failure_reason = None; let mut failure_type_opt = UpgradableRequired(None); let mut prev_channel_ids = vec![]; read_tlv_fields!(reader, { - (0, prev_channel_id_legacy, option), + (0, prev_channel_id_legacy, required), (1, failure_reason, option), (2, failure_type_opt, upgradable_required), - // If our new prev_channel_ids field is not present, the legacy field - // must be because it used to be required. (3, prev_channel_ids, (default_value, vec![ - prev_channel_id_legacy.ok_or(msgs::DecodeError::InvalidValue)?, + prev_channel_id_legacy, ])), }); @@ -2812,7 +2817,6 @@ impl MaybeReadable for Event { }); failure_reason = Some(LocalHTLCFailureReason::UnknownNextPeer.into()); } - Ok(Some(Event::HTLCHandlingFailed { prev_channel_ids, failure_type: _init_tlv_based_struct_field!( From 875a7578a4aecf4a7373daf1cff3d715d137d15d Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 6 Jan 2026 15:28:42 -0500 Subject: [PATCH 09/23] events: add TrampolineForward variant to HTLCHandlingFailureType --- lightning/src/events/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b0921509692..6e2b525d943 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -575,6 +575,14 @@ pub enum HTLCHandlingFailureType { /// The payment hash of the payment we attempted to process. payment_hash: PaymentHash, }, + /// We were responsible for pathfinding and forwarding of a trampoline payment, but failed to + /// do so. An example of such an instance is when we can't find a route to the specified + /// trampoline destination within. + TrampolineForward { + /// The set of HTLCs dispatched by our node in an attempt to complete the trampoline forward + /// which have failed. + attempted_htlcs: Vec, + }, } impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, @@ -592,6 +600,9 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, (4, Receive) => { (0, payment_hash, required), }, + (5, TrampolineForward) => { + (0, attempted_htlcs, required_vec), + }, ); /// The reason for HTLC failures in [`Event::HTLCHandlingFailed`]. From b0372d81e7ded1a868c1d25f6d1ad619d5706ff7 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 16 Jan 2026 10:06:50 -0500 Subject: [PATCH 10/23] f typo in TrampolineForward doc --- lightning/src/events/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 6e2b525d943..8891bc21709 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -577,7 +577,7 @@ pub enum HTLCHandlingFailureType { }, /// We were responsible for pathfinding and forwarding of a trampoline payment, but failed to /// do so. An example of such an instance is when we can't find a route to the specified - /// trampoline destination within. + /// trampoline destination. TrampolineForward { /// The set of HTLCs dispatched by our node in an attempt to complete the trampoline forward /// which have failed. From 5edc5f82b56084139ca2192d862dd6f30a77781a Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 16 Jan 2026 13:25:21 -0500 Subject: [PATCH 11/23] f use odd field for TrampolineForward to be forwards compatible --- lightning/src/events/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 8891bc21709..f8e34df8bf9 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -601,7 +601,7 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, (0, payment_hash, required), }, (5, TrampolineForward) => { - (0, attempted_htlcs, required_vec), + (1, attempted_htlcs, required_vec), }, ); From f1d5bc487066b5d0d6a75b4d8627506d591a3ce8 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 2 Dec 2025 10:06:41 -0500 Subject: [PATCH 12/23] ln: add TrampolineForward SendHTLCId variant This commit adds a SendHTLCId for trampoline forwards, identified by their session_priv. As with an OutboundRoute, we can expect our HTLC to be uniquely identified by a randomly generated session_priv. TrampolineForward could also be identified by the set of all previous outbound scid/htlc id pairs that represent its incoming HTLC(s). We choose the 32 byte session_priv to fix the size of this identifier rather than 16 byte scid/id pairs that will grow with the number of incoming htlcs. --- lightning/src/ln/channelmanager.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72df726d9c7..40926b342cb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -718,6 +718,7 @@ impl Default for OptionalOfferPaymentParams { pub(crate) enum SentHTLCId { PreviousHopData { prev_outbound_scid_alias: u64, htlc_id: u64 }, OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] }, + TrampolineForward { session_priv: [u8; SECRET_KEY_SIZE] }, } impl SentHTLCId { pub(crate) fn from_source(source: &HTLCSource) -> Self { @@ -740,6 +741,9 @@ impl_writeable_tlv_based_enum!(SentHTLCId, (2, OutboundRoute) => { (0, session_priv, required), }, + (4, TrampolineForward) => { + (0, session_priv, required), + }, ); // (src_outbound_scid_alias, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id) From 5fce390971eecf6086afd42d7fef866b3887e42a Mon Sep 17 00:00:00 2001 From: Maurice Date: Fri, 22 Aug 2025 10:37:21 -0400 Subject: [PATCH 13/23] ln: add TrampolineForward variant to HTLCSource enum Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/chain/channelmonitor.rs | 2 + lightning/src/ln/channelmanager.rs | 62 +++++++++++++++++++++++++++ lightning/src/util/ser.rs | 2 + 3 files changed, 66 insertions(+) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 515a3dc5f1d..ee93211af47 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2868,6 +2868,7 @@ impl ChannelMonitorImpl { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(&HTLCSource::PreviousHopData(_)) => false, + Some(&HTLCSource::TrampolineForward { .. }) => false, Some(&HTLCSource::OutboundRoute { .. }) => true, }; return Some(Balance::MaybeTimeoutClaimableHTLC { @@ -3080,6 +3081,7 @@ impl ChannelMonitor { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(HTLCSource::PreviousHopData(_)) => false, + Some(HTLCSource::TrampolineForward { .. }) => false, Some(HTLCSource::OutboundRoute { .. }) => true, }; if outbound_payment { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 40926b342cb..bf3fe5d73d3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -727,6 +727,9 @@ impl SentHTLCId { prev_outbound_scid_alias: hop_data.prev_outbound_scid_alias, htlc_id: hop_data.htlc_id, }, + HTLCSource::TrampolineForward { session_priv, .. } => { + Self::TrampolineForward { session_priv: session_priv.secret_bytes() } + }, HTLCSource::OutboundRoute { session_priv, .. } => { Self::OutboundRoute { session_priv: session_priv.secret_bytes() } }, @@ -753,6 +756,8 @@ type PerSourcePendingForward = type FailedHTLCForward = (HTLCSource, PaymentHash, HTLCFailReason, HTLCHandlingFailureType); mod fuzzy_channelmanager { + use crate::routing::router::RouteHop; + use super::*; /// Tracks the inbound corresponding to an outbound HTLC @@ -760,6 +765,16 @@ mod fuzzy_channelmanager { #[derive(Clone, Debug, PartialEq, Eq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), + TrampolineForward { + /// We might be forwarding an incoming payment that was received over MPP, and therefore + /// need to store the vector of corresponding `HTLCPreviousHopData` values. + previous_hop_data: Vec, + incoming_trampoline_shared_secret: [u8; 32], + hops: Vec, + /// In order to decode inter-Trampoline errors, we need to store the session_priv key + /// given we're effectively creating new outbound routes. + session_priv: SecretKey, + }, OutboundRoute { path: Path, session_priv: SecretKey, @@ -822,6 +837,18 @@ impl core::hash::Hash for HTLCSource { first_hop_htlc_msat.hash(hasher); bolt12_invoice.hash(hasher); }, + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + hops, + session_priv, + } => { + 2u8.hash(hasher); + previous_hop_data.hash(hasher); + incoming_trampoline_shared_secret.hash(hasher); + hops.hash(hasher); + session_priv[..].hash(hasher); + }, } } } @@ -8803,6 +8830,7 @@ where None, )); }, + HTLCSource::TrampolineForward { .. } => todo!(), } } @@ -9550,6 +9578,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, ); }, + HTLCSource::TrampolineForward { .. } => todo!(), } } @@ -16481,6 +16510,24 @@ impl Readable for HTLCSource { }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), + 2 => { + let mut previous_hop_data = Vec::new(); + let mut incoming_trampoline_shared_secret: crate::util::ser::RequiredWrapper<[u8; 32]> = crate::util::ser::RequiredWrapper(None); + let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); + let mut hops = Vec::new(); + read_tlv_fields!(reader, { + (0, previous_hop_data, required_vec), + (2, incoming_trampoline_shared_secret, required), + (4, session_priv, required), + (6, hops, required_vec), + }); + Ok(HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret: incoming_trampoline_shared_secret.0.unwrap(), + hops, + session_priv: session_priv.0.unwrap(), + }) + }, _ => Err(DecodeError::UnknownRequiredFeature), } } @@ -16513,6 +16560,20 @@ impl Writeable for HTLCSource { 1u8.write(writer)?; field.write(writer)?; }, + HTLCSource::TrampolineForward { + ref previous_hop_data, + incoming_trampoline_shared_secret, + ref session_priv, + ref hops, + } => { + 2u8.write(writer)?; + write_tlv_fields!(writer, { + (0, *previous_hop_data, required_vec), + (2, incoming_trampoline_shared_secret, required), + (4, session_priv, required), + (6, *hops, required_vec), + }); + }, } Ok(()) } @@ -18070,6 +18131,7 @@ where } else { true } }); }, + HTLCSource::TrampolineForward { .. } => todo!(), HTLCSource::OutboundRoute { payment_id, session_priv, diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 24e535b70bb..731c869b581 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1085,6 +1085,7 @@ impl Readable for Vec { impl_for_vec!(ecdsa::Signature); impl_for_vec!(crate::chain::channelmonitor::ChannelMonitorUpdate); +impl_for_vec!(crate::ln::channelmanager::HTLCPreviousHopData); impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction); impl_for_vec!(crate::ln::channelmanager::PaymentClaimDetails); impl_for_vec!(crate::ln::msgs::SocketAddress); @@ -1096,6 +1097,7 @@ impl_for_vec!(crate::ln::our_peer_storage::PeerStorageMonitorHolder); impl_for_vec!(crate::blinded_path::message::BlindedMessagePath); impl_writeable_for_vec!(&crate::routing::router::BlindedTail); impl_readable_for_vec!(crate::routing::router::BlindedTail); +impl_for_vec!(crate::routing::router::RouteHop); impl_for_vec!(crate::routing::router::TrampolineHop); impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); From 561b5d90ea79fb3db739df2efc74c84a3db61a8a Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 14 Jan 2026 15:40:06 -0500 Subject: [PATCH 14/23] f store full path and use macros for htlc source persistence --- lightning/src/ln/channelmanager.rs | 40 +++++++++++++----------------- lightning/src/routing/router.rs | 5 ++++ lightning/src/util/ser.rs | 2 -- 3 files changed, 22 insertions(+), 25 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bf3fe5d73d3..952417d58b5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -756,8 +756,6 @@ type PerSourcePendingForward = type FailedHTLCForward = (HTLCSource, PaymentHash, HTLCFailReason, HTLCHandlingFailureType); mod fuzzy_channelmanager { - use crate::routing::router::RouteHop; - use super::*; /// Tracks the inbound corresponding to an outbound HTLC @@ -770,7 +768,7 @@ mod fuzzy_channelmanager { /// need to store the vector of corresponding `HTLCPreviousHopData` values. previous_hop_data: Vec, incoming_trampoline_shared_secret: [u8; 32], - hops: Vec, + path: Path, /// In order to decode inter-Trampoline errors, we need to store the session_priv key /// given we're effectively creating new outbound routes. session_priv: SecretKey, @@ -840,13 +838,13 @@ impl core::hash::Hash for HTLCSource { HTLCSource::TrampolineForward { previous_hop_data, incoming_trampoline_shared_secret, - hops, + path, session_priv, } => { 2u8.hash(hasher); previous_hop_data.hash(hasher); incoming_trampoline_shared_secret.hash(hasher); - hops.hash(hasher); + path.hash(hasher); session_priv[..].hash(hasher); }, } @@ -16511,21 +16509,17 @@ impl Readable for HTLCSource { } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), 2 => { - let mut previous_hop_data = Vec::new(); - let mut incoming_trampoline_shared_secret: crate::util::ser::RequiredWrapper<[u8; 32]> = crate::util::ser::RequiredWrapper(None); - let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); - let mut hops = Vec::new(); - read_tlv_fields!(reader, { - (0, previous_hop_data, required_vec), - (2, incoming_trampoline_shared_secret, required), - (4, session_priv, required), - (6, hops, required_vec), + _init_and_read_len_prefixed_tlv_fields!(reader, { + (1, previous_hop_data, required_vec), + (3, incoming_trampoline_shared_secret, required), + (5, path, required), + (7, session_priv, required), }); Ok(HTLCSource::TrampolineForward { - previous_hop_data, - incoming_trampoline_shared_secret: incoming_trampoline_shared_secret.0.unwrap(), - hops, - session_priv: session_priv.0.unwrap(), + previous_hop_data: _init_tlv_based_struct_field!(previous_hop_data, required_vec), + incoming_trampoline_shared_secret: _init_tlv_based_struct_field!(incoming_trampoline_shared_secret, required), + path: _init_tlv_based_struct_field!(path, required), + session_priv: _init_tlv_based_struct_field!(session_priv, required), }) }, _ => Err(DecodeError::UnknownRequiredFeature), @@ -16564,14 +16558,14 @@ impl Writeable for HTLCSource { ref previous_hop_data, incoming_trampoline_shared_secret, ref session_priv, - ref hops, + ref path, } => { 2u8.write(writer)?; write_tlv_fields!(writer, { - (0, *previous_hop_data, required_vec), - (2, incoming_trampoline_shared_secret, required), - (4, session_priv, required), - (6, *hops, required_vec), + (1, *previous_hop_data, required_vec), + (3, incoming_trampoline_shared_secret, required), + (5, *path, required), + (7, session_priv, required), }); }, } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 40580a09c8c..93729aa7e34 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -611,6 +611,11 @@ impl Path { } } +impl_writeable_tlv_based!(Path,{ + (1, hops, required_vec), + (3, blinded_tail, option), +}); + /// A route directs a payment from the sender (us) to the recipient. If the recipient supports MPP, /// it can take multiple paths. Each path is composed of one or more hops through the network. #[derive(Clone, Debug, Hash, PartialEq, Eq)] diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 731c869b581..24e535b70bb 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1085,7 +1085,6 @@ impl Readable for Vec { impl_for_vec!(ecdsa::Signature); impl_for_vec!(crate::chain::channelmonitor::ChannelMonitorUpdate); -impl_for_vec!(crate::ln::channelmanager::HTLCPreviousHopData); impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction); impl_for_vec!(crate::ln::channelmanager::PaymentClaimDetails); impl_for_vec!(crate::ln::msgs::SocketAddress); @@ -1097,7 +1096,6 @@ impl_for_vec!(crate::ln::our_peer_storage::PeerStorageMonitorHolder); impl_for_vec!(crate::blinded_path::message::BlindedMessagePath); impl_writeable_for_vec!(&crate::routing::router::BlindedTail); impl_readable_for_vec!(crate::routing::router::BlindedTail); -impl_for_vec!(crate::routing::router::RouteHop); impl_for_vec!(crate::routing::router::TrampolineHop); impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); From e6cf8bd2ee5d7a2be751fc73b00792b3d0e9bd62 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 16 Dec 2025 15:21:57 +0200 Subject: [PATCH 15/23] ln/refactor: add claim funds for htlc forward helper Will need to share this code when we add trampoline forwarding. This commit exactly moves the logic as-is, in preparation for the next commit that will update to suit trampoline. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 294 ++++++++++++++++------------- 1 file changed, 163 insertions(+), 131 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 952417d58b5..aed85b0d0e4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9061,6 +9061,157 @@ where } } + /// Claims funds for a forwarded HTLC where we are an intermediate hop. + /// + /// Processes attribution data, calculates fees earned, and emits a [`Event::PaymentForwarded`] + /// event upon successful claim. + fn claim_funds_from_htlc_forward_hop( + &self, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, + skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, + next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, + next_channel_id: ChannelId, next_user_channel_id: Option, + hop_data: HTLCPreviousHopData, attribution_data: Option, + send_timestamp: Option, + ) { + let prev_channel_id = hop_data.channel_id; + let prev_user_channel_id = hop_data.user_channel_id; + let prev_node_id = hop_data.counterparty_node_id; + let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); + + // Obtain hold time, if available. + let hold_time = hold_time_since(send_timestamp).unwrap_or(0); + + // If attribution data was received from downstream, we shift it and get it ready for adding our hold + // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA + // to record the hold time like we do for failed HTLCs. + let attribution_data = process_fulfill_attribution_data( + attribution_data, + &hop_data.incoming_packet_shared_secret, + hold_time, + ); + + #[cfg(test)] + let claiming_chan_funding_outpoint = hop_data.outpoint; + self.claim_funds_from_hop( + hop_data, + payment_preimage, + None, + Some(attribution_data), + |htlc_claim_value_msat, definitely_duplicate| { + let chan_to_release = Some(EventUnblockedChannel { + counterparty_node_id: next_channel_counterparty_node_id, + funding_txo: next_channel_outpoint, + channel_id: next_channel_id, + blocking_action: completed_blocker, + }); + + if definitely_duplicate && startup_replay { + // On startup we may get redundant claims which are related to + // monitor updates still in flight. In that case, we shouldn't + // immediately free, but instead let that monitor update complete + // in the background. + #[cfg(test)] + { + let per_peer_state = self.per_peer_state.deadlocking_read(); + // The channel we'd unblock should already be closed, or... + let channel_closed = per_peer_state + .get(&next_channel_counterparty_node_id) + .map(|lck| lck.deadlocking_lock()) + .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) + .unwrap_or(true); + let background_events = self.pending_background_events.lock().unwrap(); + // there should be a `BackgroundEvent` pending... + let matching_bg_event = + background_events.iter().any(|ev| { + match ev { + // to apply a monitor update that blocked the claiming channel, + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + funding_txo, + update, + .. + } => { + if *funding_txo == claiming_chan_funding_outpoint { + assert!( + update.updates.iter().any(|upd| { + if let ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: update_preimage, .. + } = upd { + payment_preimage == *update_preimage + } else { false } + }), + "{:?}", + update + ); + true + } else { + false + } + }, + // or the monitor update has completed and will unblock + // immediately once we get going. + BackgroundEvent::MonitorUpdatesComplete { + channel_id, .. + } => *channel_id == prev_channel_id, + } + }); + assert!(channel_closed || matching_bg_event, "{:?}", *background_events); + } + (None, None) + } else if definitely_duplicate { + if let Some(other_chan) = chan_to_release { + ( + Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id: other_chan.counterparty_node_id, + downstream_channel_id: other_chan.channel_id, + blocking_action: other_chan.blocking_action, + }), + None, + ) + } else { + (None, None) + } + } else { + let total_fee_earned_msat = + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { + None + } + } else { + None + }; + debug_assert!( + skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat" + ); + ( + Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { + event: Some(events::Event::PaymentForwarded { + prev_htlcs: vec![events::HTLCLocator { + channel_id: prev_channel_id, + user_channel_id: prev_user_channel_id, + node_id: prev_node_id, + }], + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + total_fee_earned_msat, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }), + downstream_counterparty_and_funding_outpoint: chan_to_release, + }), + None, + ) + } + }, + ); + } + fn claim_funds_from_hop< ComplFunc: FnOnce( Option, @@ -9442,138 +9593,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, HTLCSource::PreviousHopData(hop_data) => { - let prev_channel_id = hop_data.channel_id; - let prev_user_channel_id = hop_data.user_channel_id; - let prev_node_id = hop_data.counterparty_node_id; - let completed_blocker = - RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); - - // Obtain hold time, if available. - let hold_time = hold_time_since(send_timestamp).unwrap_or(0); - - // If attribution data was received from downstream, we shift it and get it ready for adding our hold - // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA - // to record the hold time like we do for failed HTLCs. - let attribution_data = process_fulfill_attribution_data( - attribution_data, - &hop_data.incoming_packet_shared_secret, - hold_time, - ); - - #[cfg(test)] - let claiming_chan_funding_outpoint = hop_data.outpoint; - self.claim_funds_from_hop( - hop_data, + self.claim_funds_from_htlc_forward_hop( payment_preimage, - None, - Some(attribution_data), - |htlc_claim_value_msat, definitely_duplicate| { - let chan_to_release = Some(EventUnblockedChannel { - counterparty_node_id: next_channel_counterparty_node_id, - funding_txo: next_channel_outpoint, - channel_id: next_channel_id, - blocking_action: completed_blocker, - }); - - if definitely_duplicate && startup_replay { - // On startup we may get redundant claims which are related to - // monitor updates still in flight. In that case, we shouldn't - // immediately free, but instead let that monitor update complete - // in the background. - #[cfg(test)] - { - let per_peer_state = self.per_peer_state.deadlocking_read(); - // The channel we'd unblock should already be closed, or... - let channel_closed = per_peer_state - .get(&next_channel_counterparty_node_id) - .map(|lck| lck.deadlocking_lock()) - .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) - .unwrap_or(true); - let background_events = - self.pending_background_events.lock().unwrap(); - // there should be a `BackgroundEvent` pending... - let matching_bg_event = - background_events.iter().any(|ev| { - match ev { - // to apply a monitor update that blocked the claiming channel, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - funding_txo, update, .. - } => { - if *funding_txo == claiming_chan_funding_outpoint { - assert!(update.updates.iter().any(|upd| - if let ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: update_preimage, .. - } = upd { - payment_preimage == *update_preimage - } else { false } - ), "{:?}", update); - true - } else { false } - }, - // or the monitor update has completed and will unblock - // immediately once we get going. - BackgroundEvent::MonitorUpdatesComplete { - channel_id, .. - } => - *channel_id == prev_channel_id, - } - }); - assert!( - channel_closed || matching_bg_event, - "{:?}", - *background_events - ); - } - (None, None) - } else if definitely_duplicate { - if let Some(other_chan) = chan_to_release { - (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id: other_chan.counterparty_node_id, - downstream_channel_id: other_chan.channel_id, - blocking_action: other_chan.blocking_action, - }), None) - } else { - (None, None) - } - } else { - let total_fee_earned_msat = - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { - None - } - } else { - None - }; - debug_assert!( - skimmed_fee_msat <= total_fee_earned_msat, - "skimmed_fee_msat must always be included in total_fee_earned_msat" - ); - ( - Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { - event: Some(events::Event::PaymentForwarded { - prev_htlcs: vec![events::HTLCLocator { - channel_id: prev_channel_id, - user_channel_id: prev_user_channel_id, - node_id: prev_node_id, - }], - next_htlcs: vec![events::HTLCLocator { - channel_id: next_channel_id, - user_channel_id: next_user_channel_id, - node_id: Some(next_channel_counterparty_node_id), - }], - total_fee_earned_msat, - skimmed_fee_msat, - claim_from_onchain_tx: from_onchain, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }), - downstream_counterparty_and_funding_outpoint: chan_to_release, - }), - None, - ) - } - }, + forwarded_htlc_value_msat, + skimmed_fee_msat, + from_onchain, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + next_user_channel_id, + hop_data, + attribution_data, + send_timestamp, ); }, HTLCSource::TrampolineForward { .. } => todo!(), From 8dcf53478f9105570eff7184f6d3a94563565eb2 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 6 Jan 2026 09:09:06 -0500 Subject: [PATCH 16/23] ln/refactor: pass closure to create PaymentForwarded event When we introduce trampoline forwards, we're going to want to provide two external pieces of information to create events: - When to emit an event: we only want to emit one trampoline event, even when we have multiple incoming htlcs. We need to make multiple calls to claim_funds_from_htlc_forward_hop to claim each individual htlc, which are not aware of each other, so we rely on the caller's closure to decide when to emit Some or None. - Forwarding fees: we will not be able to calculate the total fee for a trampoline forward when an individual outgoing htlcs is fulfilled, because there may be other outgoing htlcs that are not accounted for (we only get the htlc_claim_value_msat for the single htlc that was just fulfilled). In future, we'll be able to provide the total fee from the channelmanager's top level view. --- lightning/src/ln/channelmanager.rs | 102 ++++++++++++++++------------- 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index aed85b0d0e4..852ec290c2f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -807,6 +807,16 @@ mod fuzzy_channelmanager { /// channel remains unconfirmed for too long. pub cltv_expiry: Option, } + + impl From<&HTLCPreviousHopData> for events::HTLCLocator { + fn from(value: &HTLCPreviousHopData) -> Self { + events::HTLCLocator { + channel_id: value.channel_id, + user_channel_id: value.user_channel_id, + node_id: value.counterparty_node_id, + } + } + } } #[cfg(fuzzing)] pub use self::fuzzy_channelmanager::*; @@ -9064,18 +9074,16 @@ where /// Claims funds for a forwarded HTLC where we are an intermediate hop. /// /// Processes attribution data, calculates fees earned, and emits a [`Event::PaymentForwarded`] - /// event upon successful claim. + /// event upon successful claim. `make_payment_forwarded_event` is responsible for creating a + /// single [`Event::PaymentForwarded`] event that represents the forward. fn claim_funds_from_htlc_forward_hop( - &self, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, - skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, - next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, - next_channel_id: ChannelId, next_user_channel_id: Option, - hop_data: HTLCPreviousHopData, attribution_data: Option, - send_timestamp: Option, + &self, payment_preimage: PaymentPreimage, + make_payment_forwarded_event: impl Fn(Option) -> Option, + startup_replay: bool, next_channel_counterparty_node_id: PublicKey, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, hop_data: HTLCPreviousHopData, + attribution_data: Option, send_timestamp: Option, ) { - let prev_channel_id = hop_data.channel_id; - let prev_user_channel_id = hop_data.user_channel_id; - let prev_node_id = hop_data.counterparty_node_id; + let _prev_channel_id = hop_data.channel_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); // Obtain hold time, if available. @@ -9151,7 +9159,7 @@ where // immediately once we get going. BackgroundEvent::MonitorUpdatesComplete { channel_id, .. - } => *channel_id == prev_channel_id, + } => *channel_id == _prev_channel_id, } }); assert!(channel_closed || matching_bg_event, "{:?}", *background_events); @@ -9171,38 +9179,16 @@ where (None, None) } } else { - let total_fee_earned_msat = - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { - None - } - } else { - None - }; - debug_assert!( - skimmed_fee_msat <= total_fee_earned_msat, - "skimmed_fee_msat must always be included in total_fee_earned_msat" - ); + let event = make_payment_forwarded_event(htlc_claim_value_msat); + if let Some(ref payment_forwarded) = event { + debug_assert!(matches!( + payment_forwarded, + &events::Event::PaymentForwarded { .. } + )); + } ( Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { - event: Some(events::Event::PaymentForwarded { - prev_htlcs: vec![events::HTLCLocator { - channel_id: prev_channel_id, - user_channel_id: prev_user_channel_id, - node_id: prev_node_id, - }], - next_htlcs: vec![events::HTLCLocator { - channel_id: next_channel_id, - user_channel_id: next_user_channel_id, - node_id: Some(next_channel_counterparty_node_id), - }], - total_fee_earned_msat, - skimmed_fee_msat, - claim_from_onchain_tx: from_onchain, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }), + event, downstream_counterparty_and_funding_outpoint: chan_to_release, }), None, @@ -9593,16 +9579,42 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, HTLCSource::PreviousHopData(hop_data) => { + let prev_htlcs = vec![events::HTLCLocator::from(&hop_data)]; self.claim_funds_from_htlc_forward_hop( payment_preimage, - forwarded_htlc_value_msat, - skimmed_fee_msat, - from_onchain, + |htlc_claim_value_msat: Option| -> Option { + let total_fee_earned_msat = + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { + None + } + } else { + None + }; + debug_assert!( + skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat" + ); + + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + total_fee_earned_msat, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) + }, startup_replay, next_channel_counterparty_node_id, next_channel_outpoint, next_channel_id, - next_user_channel_id, hop_data, attribution_data, send_timestamp, From 1aa4c7eeb965d874e65f0480d72814ba38025ea2 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 6 Jan 2026 09:42:37 -0500 Subject: [PATCH 17/23] ln: add trampoline routing payment claiming Implement payment claiming for `HTLCSource::TrampolineForward` by iterating through previous hop data and claiming funds for each HTLC. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 42 +++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 852ec290c2f..a0d8b5b12dc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9620,7 +9620,47 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ send_timestamp, ); }, - HTLCSource::TrampolineForward { .. } => todo!(), + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + // Only emit a single event for trampoline claims. + let prev_htlcs: Vec = + previous_hop_data.iter().map(Into::into).collect(); + for (i, current_previous_hop_data) in previous_hop_data.into_iter().enumerate() { + self.claim_funds_from_htlc_forward_hop( + payment_preimage, + |_: Option| -> Option { + if i == 0 { + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to provide all the + // outgoing htlcs for this forward. + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to lookup our total + // fee earnings. + total_fee_earned_msat: None, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) + } else { + None + } + }, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + current_previous_hop_data, + attribution_data.clone(), + send_timestamp, + ); + } + }, } } From 4420fd8893411da04b26527932e459b90571ec13 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Nov 2025 11:04:59 -0500 Subject: [PATCH 18/23] ln/refactor: add blinded forwarding failure helper function We'll want this extracted when we need to handle trampoline and regular forwards. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 100 ++++++++++++++++++----------- 1 file changed, 62 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a0d8b5b12dc..f2e2e2d56ad 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8731,6 +8731,19 @@ where debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } + let push_forward_htlcs_failure = + |prev_outbound_scid_alias: u64, failure: HTLCForwardInfo| { + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + match forward_htlcs.entry(prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(failure); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![failure]); + }, + } + }; + //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -8789,45 +8802,19 @@ where if blinded_failure.is_some() { "blinded " } else { "" }, onion_error ); - // In case of trampoline + phantom we prioritize the trampoline failure over the phantom failure. - // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. - let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); - let failure = match blinded_failure { - Some(BlindedFailure::FromIntroductionNode) => { - let blinded_onion_error = HTLCFailReason::reason( - LocalHTLCFailureReason::InvalidOnionBlinding, - vec![0; 32], - ); - let err_packet = blinded_onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { - htlc_id: *htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - sha256_of_onion: [0; 32], - }, - None => { - let err_packet = onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - }; - let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - match forward_htlcs.entry(*prev_outbound_scid_alias) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(failure); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![failure]); - }, - } - mem::drop(forward_htlcs); + push_forward_htlcs_failure( + *prev_outbound_scid_alias, + get_htlc_forward_failure( + blinded_failure, + onion_error, + incoming_packet_shared_secret, + trampoline_shared_secret, + phantom_shared_secret, + *htlc_id, + ), + ); + let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back(( events::Event::HTLCHandlingFailed { @@ -13088,6 +13075,43 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } +/// Constructs an HTLC forward failure for sending back to the previous hop, converting to a blinded +/// failure where appropriate. +/// +/// When both trampoline and phantom secrets are present, the trampoline secret takes priority +/// for error encryption. +fn get_htlc_forward_failure( + blinded_failure: &Option, onion_error: &HTLCFailReason, + incoming_packet_shared_secret: &[u8; 32], trampoline_shared_secret: &Option<[u8; 32]>, + phantom_shared_secret: &Option<[u8; 32]>, htlc_id: u64, +) -> HTLCForwardInfo { + // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. + let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); + match blinded_failure { + Some(BlindedFailure::FromIntroductionNode) => { + let blinded_onion_error = + HTLCFailReason::reason(LocalHTLCFailureReason::InvalidOnionBlinding, vec![0; 32]); + let err_packet = blinded_onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { + htlc_id, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], + }, + None => { + let err_packet = onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + } +} + /// Parameters used with [`create_bolt11_invoice`]. /// /// [`create_bolt11_invoice`]: ChannelManager::create_bolt11_invoice From 606330e292f39dbf08f9c0213fe1bed27b9b157f Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 1 Dec 2025 15:50:46 -0500 Subject: [PATCH 19/23] ln: add trampoline routing failure handling Implement failure propagation for `HTLCSource::TrampolineForward` by iterating through previous hop data and failing each HTLC with `TemporaryTrampolineFailure`. Note that testing should be implemented when trampoline forward is completed. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 59 +++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2e2e2d56ad..adc6447837e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8825,7 +8825,64 @@ where None, )); }, - HTLCSource::TrampolineForward { .. } => todo!(), + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + .. + } => { + // TODO: what do we want to do with this given we do not wish to propagate it directly? + let _decoded_onion_failure = + onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); + let incoming_trampoline_shared_secret = Some(*incoming_trampoline_shared_secret); + + // TODO: when we receive a failure from a single outgoing trampoline HTLC, we don't + // necessarily want to fail all of our incoming HTLCs back yet. We may have other + // outgoing HTLCs that need to resolve first. This will be tracked in our + // pending_outbound_payments in a followup. + for current_hop_data in previous_hop_data { + let incoming_packet_shared_secret = + ¤t_hop_data.incoming_packet_shared_secret; + let channel_id = ¤t_hop_data.channel_id; + let short_channel_id = ¤t_hop_data.prev_outbound_scid_alias; + let htlc_id = ¤t_hop_data.htlc_id; + let blinded_failure = ¤t_hop_data.blinded_failure; + log_trace!( + WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)), + "Failing {}HTLC with payment_hash {} backwards from us following Trampoline forwarding failure: {:?}", + if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error + ); + let onion_error = HTLCFailReason::reason( + LocalHTLCFailureReason::TemporaryTrampolineFailure, + Vec::new(), + ); + push_forward_htlcs_failure( + *short_channel_id, + get_htlc_forward_failure( + blinded_failure, + &onion_error, + incoming_packet_shared_secret, + &incoming_trampoline_shared_secret, + &None, + *htlc_id, + ), + ); + } + + // We only want to emit a single event for trampoline failures, so we do it once + // we've failed back all of our incoming HTLCs. + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back(( + events::Event::HTLCHandlingFailed { + prev_channel_ids: previous_hop_data + .iter() + .map(|prev| prev.channel_id) + .collect(), + failure_type, + failure_reason: Some(onion_error.into()), + }, + None, + )); + }, } } From c3512391c4249875aab7de056ddfdc6b0cad6cd5 Mon Sep 17 00:00:00 2001 From: Maurice Date: Mon, 25 Aug 2025 15:33:44 -0400 Subject: [PATCH 20/23] ln/refactor: extract channelmonitor recovery to external helper Move recovery logic for `HTLCSource::PreviousHopData` into `channel_monitor_recovery_internal` to prepare for trampoline forward reuse. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 108 ++++++++++++++++++----------- 1 file changed, 66 insertions(+), 42 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index adc6447837e..c086f41cf81 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18244,51 +18244,17 @@ where let htlc_id = SentHTLCId::from_source(&htlc_source); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); }, HTLCSource::TrampolineForward { .. } => todo!(), HTLCSource::OutboundRoute { @@ -19205,6 +19171,64 @@ where } } +/// Removes pending HTLC entries that the ChannelMonitor has already taken responsibility for, +/// cleaning up state mismatches that can occur during restart. +fn reconcile_pending_htlcs_with_monitor( + forward_htlcs_legacy: &mut HashMap>, + pending_events_read: &mut VecDeque<(Event, Option)>, + pending_intercepted_htlcs_legacy: &mut HashMap, + decode_update_add_htlcs: &mut HashMap>, + decode_update_add_htlcs_legacy: &mut HashMap>, + prev_hop_data: HTLCPreviousHopData, logger: &impl Logger, payment_hash: PaymentHash, + channel_id: ChannelId, +) { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + dedup_decode_update_add_htlcs( + decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); +} + #[cfg(test)] mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; From 17f432a14b792d4ab83fb5fe01141ebe0c33d116 Mon Sep 17 00:00:00 2001 From: Maurice Date: Mon, 25 Aug 2025 15:38:34 -0400 Subject: [PATCH 21/23] ln: add channel monitor recovery for trampoline forwards Implement channel monitor recovery for trampoline forwards iterating over all hop data and updating pending forwards. --- lightning/src/ln/channelmanager.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c086f41cf81..740563f2844 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18256,7 +18256,21 @@ where monitor.channel_id(), ); }, - HTLCSource::TrampolineForward { .. } => todo!(), + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + for prev_hop_data in previous_hop_data { + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, + &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), + ); + } + }, HTLCSource::OutboundRoute { payment_id, session_priv, From 6a2421f5c08c6c4e8b0422c5e6ac8bd7b0a7b245 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 15 Jan 2026 16:27:50 -0500 Subject: [PATCH 22/23] ln/refactor: move outgoing payment replay code into helper function --- lightning/src/ln/channelmanager.rs | 223 +++++++++++++++++------------ 1 file changed, 130 insertions(+), 93 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 740563f2844..d1dd7829a69 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17357,6 +17357,91 @@ fn dedup_decode_update_add_htlcs( }); } +/// Checks if a forwarded HTLC claim needs to be replayed on startup, returning None if it doesn't +/// need to be replayed. When the HTLC needs to be claimed, it returns a bool indicating whether +/// deserialization of should be failed due to missing information. +fn prev_hop_needs_claim_replay( + prev_hop: &HTLCPreviousHopData, payment_preimage: PaymentPreimage, + inbound_edge_monitor: Option<&ChannelMonitor>, + short_to_chan_info: &HashMap, logger: &L, +) -> Option +where + L::Target: Logger, +{ + // Note that for channels which have gone to chain, `get_all_current_outbound_htlcs` is never + // pruned and always returns a constant set until the monitor is removed/archived. Thus, we + // want to skip replaying claims that have definitely been resolved on-chain. + + // If the inbound monitor is not present, we assume it was fully resolved and properly archived, + // implying this payment had plenty of time to get claimed and we can safely skip any further + // attempts to claim it (they wouldn't succeed anyway as we don't have a monitor against which + // to do so). + let inbound_edge_monitor = inbound_edge_monitor?; + + // Second, if the inbound edge of the payment's monitor has been fully claimed we've had at + // least `ANTI_REORG_DELAY` blocks to get any PaymentForwarded event(s) to the user and assume + // that there's no need to try to replay the claim just for that. + let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances(); + if inbound_edge_balances.is_empty() { + return None; + } + + let mut fail_read = false; + if prev_hop.counterparty_node_id.is_none() { + // We no longer support claiming an HTLC where we don't have the counterparty_node_id + // available if the claim has to go to a closed channel. Its possible we can get away with + // it if the channel is not yet closed, but its by no means a guarantee. + + // Thus, in this case we are a bit more aggressive with our pruning - if we have no use for + // the claim (because the inbound edge of the payment's monitor has already claimed the + // HTLC) we skip trying to replay the claim. + let htlc_payment_hash: PaymentHash = payment_preimage.into(); + let balance_could_incl_htlc = |bal| match bal { + &Balance::ClaimableOnChannelClose { .. } => { + // The channel is still open, assume we can still claim against it + true + }, + &Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => { + payment_hash == htlc_payment_hash + }, + _ => false, + }; + let htlc_may_be_in_balances = inbound_edge_balances.iter().any(balance_could_incl_htlc); + if !htlc_may_be_in_balances { + return None; + } + + // First check if we're absolutely going to fail - if we need to replay this claim to get + // the preimage into the inbound edge monitor but the channel is closed (and thus we'll + // immediately panic if we call claim_funds_from_hop). + if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { + log_error!(logger, + "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124. \ + All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", + htlc_payment_hash, + payment_preimage, + ); + fail_read = true; + } + + // At this point we're confident we need the claim, but the inbound edge channel is still + // live. As long as this remains the case, we can conceivably proceed, but we run some risk + // of panicking at runtime. The user ideally should have read the release notes and we + // wouldn't be here, but we go ahead and let things run in the hope that it'll all just + // work out. + log_error!(logger, + "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably. \ + As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime! \ + All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1. \ + Continuing anyway, though panics may occur!", + htlc_payment_hash, + payment_preimage, + ); + } + + Some(fail_read) +} + // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl< @@ -18369,107 +18454,59 @@ where // preimages from it which may be needed in upstream channels for forwarded // payments. let mut fail_read = false; - let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs() + let outbound_claimed_htlcs_iter = monitor + .get_all_current_outbound_htlcs() .into_iter() .filter_map(|(htlc_source, (htlc, preimage_opt))| { - if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source { - if let Some(payment_preimage) = preimage_opt { - let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id); - // Note that for channels which have gone to chain, - // `get_all_current_outbound_htlcs` is never pruned and always returns - // a constant set until the monitor is removed/archived. Thus, we - // want to skip replaying claims that have definitely been resolved - // on-chain. - - // If the inbound monitor is not present, we assume it was fully - // resolved and properly archived, implying this payment had plenty - // of time to get claimed and we can safely skip any further - // attempts to claim it (they wouldn't succeed anyway as we don't - // have a monitor against which to do so). - let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor { - monitor - } else { - return None; - }; - // Second, if the inbound edge of the payment's monitor has been - // fully claimed we've had at least `ANTI_REORG_DELAY` blocks to - // get any PaymentForwarded event(s) to the user and assume that - // there's no need to try to replay the claim just for that. - let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances(); - if inbound_edge_balances.is_empty() { - return None; - } + let payment_preimage = preimage_opt?; - if prev_hop.counterparty_node_id.is_none() { - // We no longer support claiming an HTLC where we don't have - // the counterparty_node_id available if the claim has to go to - // a closed channel. Its possible we can get away with it if - // the channel is not yet closed, but its by no means a - // guarantee. - - // Thus, in this case we are a bit more aggressive with our - // pruning - if we have no use for the claim (because the - // inbound edge of the payment's monitor has already claimed - // the HTLC) we skip trying to replay the claim. - let htlc_payment_hash: PaymentHash = payment_preimage.into(); - let balance_could_incl_htlc = |bal| match bal { - &Balance::ClaimableOnChannelClose { .. } => { - // The channel is still open, assume we can still - // claim against it - true - }, - &Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => { - payment_hash == htlc_payment_hash - }, - _ => false, - }; - let htlc_may_be_in_balances = - inbound_edge_balances.iter().any(balance_could_incl_htlc); - if !htlc_may_be_in_balances { - return None; - } + let prev_htlcs = match &htlc_source { + HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop], + // If it was an outbound payment, we've handled it above - if a preimage + // came in and we persisted the `ChannelManager` we either handled it + // and are good to go or the channel force-closed - we don't have to + // handle the channel still live case here. + _ => vec![], + }; - // First check if we're absolutely going to fail - if we need - // to replay this claim to get the preimage into the inbound - // edge monitor but the channel is closed (and thus we'll - // immediately panic if we call claim_funds_from_hop). - if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { - log_error!(args.logger, - "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\ - All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", - htlc_payment_hash, - payment_preimage, - ); - fail_read = true; - } + let prev_htlcs_count = prev_htlcs.len(); + if prev_htlcs_count == 0 { + return None; + } - // At this point we're confident we need the claim, but the - // inbound edge channel is still live. As long as this remains - // the case, we can conceivably proceed, but we run some risk - // of panicking at runtime. The user ideally should have read - // the release notes and we wouldn't be here, but we go ahead - // and let things run in the hope that it'll all just work out. - log_error!(args.logger, - "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\ - As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ - All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\ - Continuing anyway, though panics may occur!", - htlc_payment_hash, - payment_preimage, - ); + for prev_hop in prev_htlcs { + let inbound_edge_monitor = + args.channel_monitors.get(&prev_hop.channel_id).copied(); + if let Some(fail_claim_read) = prev_hop_needs_claim_replay( + prev_hop, + payment_preimage, + inbound_edge_monitor, + &short_to_chan_info, + &args.logger, + ) { + // We can only fail to read from disk for legacy HTLCs that have + // a single prev_htlc. If we could fail_claim_read for multiple + // prev_htlcs, it wouldn't be correct to exit early on our first + // claimable prev_hop (because a subsequent one may + // fail_claim_read). + if fail_claim_read { + debug_assert!(prev_htlcs_count == 1); } - Some((htlc_source, payment_preimage, htlc.amount_msat, - is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) - } else { None } - } else { - // If it was an outbound payment, we've handled it above - if a preimage - // came in and we persisted the `ChannelManager` we either handled it and - // are good to go or the channel force-closed - we don't have to handle the - // channel still live case here. - None + fail_read |= fail_claim_read; + return Some(( + htlc_source, + payment_preimage, + htlc.amount_msat, + is_channel_closed, + monitor.get_counterparty_node_id(), + monitor.get_funding_txo(), + monitor.channel_id(), + )); + } } + + None }); for tuple in outbound_claimed_htlcs_iter { pending_claims_to_replay.push(tuple); From 2c5269034aaca0edf7fa1eafd3883a7a72a159f9 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 16 Jan 2026 13:03:55 -0500 Subject: [PATCH 23/23] ln: handle trampoline claims on restart This commit uses the existing outbound payment claims replay logic to restore trampoline claims. If any single previous hop in a htlc source with multiple previous hops requires claim, we represent this with a single outbound claimed htlc because we assume that *all* of the incoming htlcs are represented in the source, and will be appropriately claimed (rather than submitting multiple claims, which will end up being duplicates of each other). This is the case for trampoline payments, where the htlc_source stores all previous hops. --- lightning/src/ln/channelmanager.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d1dd7829a69..ce75b7b54f3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18462,6 +18462,9 @@ where let prev_htlcs = match &htlc_source { HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop], + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + previous_hop_data.iter().collect() + }, // If it was an outbound payment, we've handled it above - if a preimage // came in and we persisted the `ChannelManager` we either handled it // and are good to go or the channel force-closed - we don't have to @@ -18495,6 +18498,10 @@ where fail_read |= fail_claim_read; return Some(( + // When we have multiple prev_htlcs we assume that they all + // share the same htlc_source which contains all previous hops, + // so we can exit on the first claimable prev_hop because this + // will result in all prev_hops being claimed. htlc_source, payment_preimage, htlc.amount_msat,