Skip to content

Commit 58edcb2

Browse files
Test restart-claim of two MPP holding cell HTLCs
Test that if we restart and had two inbound MPP-part HTLCs received over the same channel in the holding cell prior to shutdown, and we lost the holding cell prior to restart, those HTLCs will still be claimed backwards. Test largely written by Claude
1 parent 94cb95d commit 58edcb2

3 files changed

Lines changed: 161 additions & 9 deletions

File tree

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3519,8 +3519,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode
35193519
.node
35203520
.handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed);
35213521
check_added_monitors(&nodes[1], 1);
3522-
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
3522+
let (a, raa, holding_cell) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
35233523
assert!(a.is_none());
3524+
assert!(holding_cell.is_empty());
35243525

35253526
nodes[1].node.handle_revoke_and_ack(node_a_id, &raa);
35263527
check_added_monitors(&nodes[1], 1);

lightning/src/ln/functional_test_utils.rs

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2672,28 +2672,33 @@ pub fn commitment_signed_dance_through_cp_raa(
26722672
node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool,
26732673
includes_claim: bool,
26742674
) -> Option<MessageSendEvent> {
2675-
let (extra_msg_option, bs_revoke_and_ack) =
2675+
let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) =
26762676
do_main_commitment_signed_dance(node_a, node_b, fail_backwards);
2677+
assert!(node_b_holding_cell_htlcs.is_empty());
26772678
node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack);
26782679
check_added_monitors(node_a, if includes_claim { 0 } else { 1 });
26792680
extra_msg_option
26802681
}
26812682

26822683
/// Does the main logic in the commitment_signed dance. After the first `commitment_signed` has
2683-
/// been delivered, this method picks up and delivers the response `revoke_and_ack` and
2684-
/// `commitment_signed`, returning the recipient's `revoke_and_ack` and any extra message it may
2685-
/// have included.
2684+
/// been delivered, delivers the response `revoke_and_ack` and `commitment_signed`, and returns:
2685+
/// - The recipient's `revoke_and_ack`
2686+
/// - The recipient's extra message (if any) after handling the commitment_signed
2687+
/// - Any messages released from the initiator's holding cell after handling the `revoke_and_ack`
2688+
/// (e.g., a second HTLC on the same channel)
26862689
pub fn do_main_commitment_signed_dance(
26872690
node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool,
2688-
) -> (Option<MessageSendEvent>, msgs::RevokeAndACK) {
2691+
) -> (Option<MessageSendEvent>, msgs::RevokeAndACK, Vec<MessageSendEvent>) {
26892692
let node_a_id = node_a.node.get_our_node_id();
26902693
let node_b_id = node_b.node.get_our_node_id();
26912694

26922695
let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs(node_a, &node_b_id);
26932696
check_added_monitors(&node_b, 0);
26942697
assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
26952698
node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack);
2696-
assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
2699+
// Handling the RAA may release HTLCs from node_b's holding cell (e.g., if multiple HTLCs
2700+
// were sent over the same channel and the second was queued behind the first).
2701+
let node_b_holding_cell_htlcs = node_b.node.get_and_clear_pending_msg_events();
26972702
check_added_monitors(&node_b, 1);
26982703
node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed);
26992704
let (bs_revoke_and_ack, extra_msg_option) = {
@@ -2716,7 +2721,7 @@ pub fn do_main_commitment_signed_dance(
27162721
assert!(node_a.node.get_and_clear_pending_events().is_empty());
27172722
assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
27182723
}
2719-
(extra_msg_option, bs_revoke_and_ack)
2724+
(extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs)
27202725
}
27212726

27222727
/// Runs the commitment_signed dance by delivering the commitment_signed and handling the
@@ -2733,9 +2738,10 @@ pub fn commitment_signed_dance_return_raa(
27332738
.node
27342739
.handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed);
27352740
check_added_monitors(&node_a, 1);
2736-
let (extra_msg_option, bs_revoke_and_ack) =
2741+
let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) =
27372742
do_main_commitment_signed_dance(&node_a, &node_b, fail_backwards);
27382743
assert!(extra_msg_option.is_none());
2744+
assert!(node_b_holding_cell_htlcs.is_empty());
27392745
bs_revoke_and_ack
27402746
}
27412747

lightning/src/ln/reload_tests.rs

Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2092,3 +2092,148 @@ fn test_reload_node_without_preimage_fails_htlc() {
20922092
// nodes[0] should now have received the failure and generate PaymentFailed.
20932093
expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
20942094
}
2095+
2096+
#[test]
2097+
fn test_reload_with_mpp_claims_on_same_channel() {
2098+
// Test that if a forwarding node has two HTLCs for the same MPP payment that were both
2099+
// irrevocably removed on the outbound edge via claim but are still forwarded-and-unresolved
2100+
// on the inbound edge, both HTLCs will be claimed backwards on restart.
2101+
//
2102+
// Topology:
2103+
// nodes[0] ----chan_0_1----> nodes[1] ----chan_1_2_a----> nodes[2]
2104+
// \----chan_1_2_b---/
2105+
let chanmon_cfgs = create_chanmon_cfgs(3);
2106+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2107+
let persister;
2108+
let new_chain_monitor;
2109+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2110+
let nodes_1_deserialized;
2111+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2112+
2113+
let node_0_id = nodes[0].node.get_our_node_id();
2114+
let node_1_id = nodes[1].node.get_our_node_id();
2115+
let node_2_id = nodes[2].node.get_our_node_id();
2116+
2117+
let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 2_000_000, 0);
2118+
let chan_1_2_a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
2119+
let chan_1_2_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
2120+
2121+
let chan_id_0_1 = chan_0_1.2;
2122+
let chan_id_1_2_a = chan_1_2_a.2;
2123+
let chan_id_1_2_b = chan_1_2_b.2;
2124+
2125+
// Send an MPP payment large enough that the router must split it across both outbound channels.
2126+
// Each 1M sat outbound channel has 100M msat max in-flight, so 150M msat requires splitting.
2127+
let amt_msat = 150_000_000;
2128+
let (route, payment_hash, payment_preimage, payment_secret) =
2129+
get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
2130+
2131+
let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
2132+
nodes[0].node.send_payment_with_route(
2133+
route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id,
2134+
).unwrap();
2135+
check_added_monitors(&nodes[0], 1);
2136+
2137+
// Forward the first HTLC nodes[0] -> nodes[1] -> nodes[2]. Note that the second HTLC is released
2138+
// from the holding cell during the first HTLC's commitment_signed_dance.
2139+
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2140+
assert_eq!(events.len(), 1);
2141+
let payment_event_1 = SendEvent::from_event(events.remove(0));
2142+
2143+
nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event_1.msgs[0]);
2144+
check_added_monitors(&nodes[1], 0);
2145+
nodes[1].node.handle_commitment_signed_batch_test(node_0_id, &payment_event_1.commitment_msg);
2146+
check_added_monitors(&nodes[1], 1);
2147+
let (_, raa, holding_cell_htlcs) =
2148+
do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
2149+
assert_eq!(holding_cell_htlcs.len(), 1);
2150+
let payment_event_2 = holding_cell_htlcs.into_iter().next().unwrap();
2151+
nodes[1].node.handle_revoke_and_ack(node_0_id, &raa);
2152+
check_added_monitors(&nodes[1], 1);
2153+
2154+
nodes[1].node.process_pending_htlc_forwards();
2155+
check_added_monitors(&nodes[1], 1);
2156+
let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2157+
assert_eq!(events.len(), 1);
2158+
let ev_1_2 = events.remove(0);
2159+
pass_along_path(
2160+
&nodes[1], &[&nodes[2]], amt_msat, payment_hash, Some(payment_secret), ev_1_2, false, None,
2161+
);
2162+
2163+
// Second HTLC: full path nodes[0] -> nodes[1] -> nodes[2]. PaymentClaimable expected at end.
2164+
pass_along_path(
2165+
&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret),
2166+
payment_event_2, true, None,
2167+
);
2168+
2169+
// Claim the HTLCs such that they're fully removed from the outbound edge, but disconnect
2170+
// node_0<>node_1 so that they can't be claimed backwards by node_1.
2171+
nodes[2].node.claim_funds(payment_preimage);
2172+
check_added_monitors(&nodes[2], 2);
2173+
expect_payment_claimed!(nodes[2], payment_hash, amt_msat);
2174+
2175+
nodes[0].node.peer_disconnected(node_1_id);
2176+
nodes[1].node.peer_disconnected(node_0_id);
2177+
2178+
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
2179+
assert_eq!(events.len(), 2);
2180+
for ev in events {
2181+
match ev {
2182+
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => {
2183+
assert_eq!(*node_id, node_1_id);
2184+
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2185+
nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates.update_fulfill_htlcs[0].clone());
2186+
check_added_monitors(&nodes[1], 1);
2187+
do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false);
2188+
},
2189+
_ => panic!("Unexpected event"),
2190+
}
2191+
}
2192+
2193+
let events = nodes[1].node.get_and_clear_pending_events();
2194+
assert_eq!(events.len(), 2);
2195+
for event in events {
2196+
expect_payment_forwarded(
2197+
event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false,
2198+
);
2199+
}
2200+
2201+
// Clear the holding cell's claim entries on chan_0_1 before serialization.
2202+
// This simulates a crash where both HTLCs were fully removed on the outbound edges but are
2203+
// still present on the inbound edge without a resolution.
2204+
nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1);
2205+
2206+
let node_1_serialized = nodes[1].node.encode();
2207+
let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode();
2208+
let mon_1_2_a_serialized = get_monitor!(nodes[1], chan_id_1_2_a).encode();
2209+
let mon_1_2_b_serialized = get_monitor!(nodes[1], chan_id_1_2_b).encode();
2210+
2211+
reload_node!(
2212+
nodes[1],
2213+
node_1_serialized,
2214+
&[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized],
2215+
persister,
2216+
new_chain_monitor,
2217+
nodes_1_deserialized,
2218+
Some(true)
2219+
);
2220+
2221+
// When the claims are reconstructed during reload, PaymentForwarded events are regenerated.
2222+
let events = nodes[1].node.get_and_clear_pending_events();
2223+
assert_eq!(events.len(), 2);
2224+
for event in events {
2225+
expect_payment_forwarded(
2226+
event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false,
2227+
);
2228+
}
2229+
// Fetching events triggers the pending monitor updates (one for each HTLC preimage) to be applied.
2230+
check_added_monitors(&nodes[1], 2);
2231+
2232+
// Reconnect nodes[1] to nodes[0]. Both claims should be in nodes[1]'s holding cell.
2233+
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]);
2234+
reconnect_args.pending_cell_htlc_claims = (0, 2);
2235+
reconnect_nodes(reconnect_args);
2236+
2237+
// nodes[0] should now have received both fulfills and generate PaymentSent.
2238+
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
2239+
}

0 commit comments

Comments
 (0)