Skip to content

Commit

Permalink
Enable decoding new incoming HTLC onions when fully committed
Browse files Browse the repository at this point in the history
This commit ensures all new incoming HTLCs going forward will have their
onion decoded when they become fully committed to decide how we should
proceed with each one. As a result, we'll obtain `HTLCHandlingFailed`
events for _any_ failed HTLC that comes across a channel.

We will now start writing channels with the new serialization version
(4), and we will still be able to downgrade back to the commit that
introduced it since reading version 4 is supported.

Note that existing pending inbound HTLCs may already have their
resolution if they were received in a previous version of LDK. We must
support those until we no longer allow downgrading beyond this commit.
  • Loading branch information
wpaulino committed Mar 29, 2024
1 parent da7391f commit 11c18ec
Show file tree
Hide file tree
Showing 11 changed files with 327 additions and 273 deletions.
6 changes: 4 additions & 2 deletions lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1021,9 +1021,11 @@ pub enum Event {
/// * When an unknown SCID is requested for forwarding a payment.
/// * Expected MPP amount has already been reached
/// * The HTLC has timed out
/// * The HTLC failed to meet the forwarding requirements (i.e. insufficient fees paid, or a
/// CLTV that is too soon)
///
/// This event, however, does not get generated if an HTLC fails to meet the forwarding
/// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
/// The list above is not meant to cover all scenarios. This event should be expected for each
/// incoming HTLC that has been fully committed but we failed to handle.
HTLCHandlingFailed {
/// The channel over which the HTLC was received.
prev_channel_id: ChannelId,
Expand Down
64 changes: 54 additions & 10 deletions lightning/src/ln/blinded_payment_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,8 +274,10 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
// We need the session priv to construct a bogus onion packet later.
*nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
let chan_upd_1_2 = chan_1_2.0.contents;
let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
let chan_upd_2_3 = chan_2_3.0.contents;

let amt_msat = 5000;
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
Expand Down Expand Up @@ -327,18 +329,27 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
check_added_monitors!(nodes[1], 0);
do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true);

expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);

if intro_fails {
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
let failed_destination = match check {
ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion,
ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::FailedPayment { payment_hash },
ForwardCheckFail::OutboundChannelCheck =>
HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 },
};
expect_htlc_handling_failed_destinations!(
nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()]
);
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
return
}

expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);

let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
let mut update_add = &mut updates_1_2.update_add_htlcs[0];

Expand All @@ -348,6 +359,17 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true);

expect_pending_htlcs_forwardable!(nodes[2]);
let failed_destination = match check {
ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion,
ForwardCheckFail::OutboundChannelCheck =>
HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 },
};
expect_htlc_handling_failed_destinations!(
nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()]
);
check_added_monitors!(nodes[2], 1);

let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
let update_malformed = &mut updates.update_fail_malformed_htlcs[0];
assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
Expand Down Expand Up @@ -407,7 +429,10 @@ fn failed_backwards_to_intro_node() {
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true);
nodes[2].node.process_pending_htlc_forwards();

expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]);
check_added_monitors(&nodes[2], 1);

let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0];
Expand Down Expand Up @@ -483,7 +508,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
// intro node will error backwards.
$curr_node.node.peer_disconnected(&$next_node.node.get_our_node_id());
expect_pending_htlcs_forwardable!($curr_node);
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(),
vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]);
},
ProcessPendingHTLCsCheck::FwdChannelClosed => {
Expand All @@ -499,12 +524,12 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
crate::events::Event::ChannelClosed { .. } => {},
_ => panic!("Unexpected event {:?}", events),
}
check_closed_broadcast(&$curr_node, 1, true);
check_added_monitors!($curr_node, 1);

$curr_node.node.process_pending_htlc_forwards();
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(),
vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]);
check_closed_broadcast(&$curr_node, 1, true);
check_added_monitors!($curr_node, 1);
$curr_node.node.process_pending_htlc_forwards();
},
}
Expand Down Expand Up @@ -590,6 +615,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) {
};
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
expect_pending_htlcs_forwardable!(nodes[1]);

let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
Expand Down Expand Up @@ -891,13 +917,19 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]);
check_added_monitors(&nodes[2], 1);
},
ReceiveCheckFail::ReceiveRequirements => {
let update_add = &mut payment_event_1_2.msgs[0];
update_add.amount_msat -= 1;
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors(&nodes[2], 1);
},
ReceiveCheckFail::ChannelCheck => {
nodes[2].node.close_channel(&chan_id_1_2, &nodes[1].node.get_our_node_id()).unwrap();
Expand All @@ -911,6 +943,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {

nodes[2].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors(&nodes[2], 1);
},
ReceiveCheckFail::ProcessPendingHTLCsCheck => {
assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV);
Expand All @@ -926,6 +961,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors(&nodes[2], 1);
}
}

Expand Down Expand Up @@ -1126,6 +1164,12 @@ fn min_htlc() {
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]);
check_added_monitors!(nodes[1], 0);
do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[1]);
expect_htlc_handling_failed_destinations!(
nodes[1].node.get_and_clear_pending_events(),
&[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }]
);
check_added_monitors(&nodes[1], 1);
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
Expand Down
55 changes: 10 additions & 45 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4121,9 +4121,7 @@ impl<SP: Deref> Channel<SP> where
Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
}

pub fn update_add_htlc(
&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
) -> Result<(), ChannelError> {
pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC) -> Result<(), ChannelError> {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
Expand Down Expand Up @@ -4221,21 +4219,15 @@ impl<SP: Deref> Channel<SP> where
return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
}

if self.context.channel_state.is_local_shutdown_sent() {
if let PendingHTLCStatus::Forward(_) = pending_forward_status {
panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
}
}

// Now update local state:
self.context.next_counterparty_htlc_id += 1;
self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
htlc_id: msg.htlc_id,
amount_msat: msg.amount_msat,
payment_hash: msg.payment_hash,
cltv_expiry: msg.cltv_expiry,
state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
pending_htlc_status: pending_forward_status
state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending {
update_add_htlc: msg.clone(),
}),
});
Ok(())
Expand Down Expand Up @@ -6118,7 +6110,7 @@ impl<SP: Deref> Channel<SP> where
};
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
Expand All @@ -6128,7 +6120,7 @@ impl<SP: Deref> Channel<SP> where

let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
Expand Down Expand Up @@ -6162,11 +6154,11 @@ impl<SP: Deref> Channel<SP> where
// side, only on the sender's. Note that with anchor outputs we are no longer as
// sensitive to fee spikes, so we need to account for them.
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
}
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
if pending_remote_value_msat.saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
return Err(("Fee spike buffer violation", 0x1000|7));
}
Expand Down Expand Up @@ -8370,18 +8362,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
// called.

let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
},
_ => false,
}) {
SERIALIZATION_VERSION
} else {
MIN_SERIALIZATION_VERSION
};
write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);

// `user_id` used to be a single u64 value. In order to remain backwards compatible with
// versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
Expand Down Expand Up @@ -8439,27 +8420,11 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
&InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
&InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
1u8.write(writer)?;
if version_to_write <= 3 {
if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
pending_htlc_status.write(writer)?;
} else {
panic!();
}
} else {
htlc_resolution.write(writer)?;
}
htlc_resolution.write(writer)?;
},
&InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
2u8.write(writer)?;
if version_to_write <= 3 {
if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
pending_htlc_status.write(writer)?;
} else {
panic!();
}
} else {
htlc_resolution.write(writer)?;
}
htlc_resolution.write(writer)?;
},
&InboundHTLCState::Committed => {
3u8.write(writer)?;
Expand Down
Loading

0 comments on commit 11c18ec

Please sign in to comment.