Skip to content

Commit

Permalink
Enable decoding new incoming HTLC onions when fully committed
Browse files Browse the repository at this point in the history
This commit ensures all new incoming HTLCs going forward will have their
onion decoded when they become fully committed to decide how we should
proceed with each one. As a result, we'll obtain `HTLCHandlingFailed`
events for _any_ failed HTLC that comes across a channel.

We will now start writing channels with the new serialization version
(4), and we will still be able to downgrade back to the commit that
introduced it since reading version 4 is supported.

Note that existing pending inbound HTLCs may already have their
resolution if they were received in a previous version of LDK. We must
support those until we no longer allow downgrading beyond this commit.
  • Loading branch information
wpaulino committed Sep 12, 2024
1 parent 19ef855 commit 0562830
Show file tree
Hide file tree
Showing 12 changed files with 337 additions and 282 deletions.
24 changes: 12 additions & 12 deletions fuzz/src/full_stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1344,8 +1344,8 @@ mod tests {
// end of update_add_htlc from 0 to 1 via client and mac
ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);

// Two feerate requests to check dust exposure
ext_from_hex("00fd00fd", &mut test);
// One feerate request to check dust exposure
ext_from_hex("00fd", &mut test);

// inbound read from peer id 0 of len 18
ext_from_hex("030012", &mut test);
Expand All @@ -1368,8 +1368,8 @@ mod tests {

// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
// Three feerate requests to check dust exposure
ext_from_hex("00fd00fd00fd", &mut test);
// Four feerate requests to check dust exposure while forwarding the HTLC
ext_from_hex("00fd00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)

// we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
Expand Down Expand Up @@ -1445,8 +1445,8 @@ mod tests {
// end of update_add_htlc from 0 to 1 via client and mac
ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);

// Two feerate requests to check dust exposure
ext_from_hex("00fd00fd", &mut test);
// One feerate request to check dust exposure
ext_from_hex("00fd", &mut test);

// now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0
// inbound read from peer id 0 of len 18
Expand Down Expand Up @@ -1480,8 +1480,8 @@ mod tests {
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);

// Three feerate requests to check dust exposure
ext_from_hex("00fd00fd00fd", &mut test);
// Four feerate requests to check dust exposure while forwarding the HTLC
ext_from_hex("00fd00fd00fd00fd", &mut test);

// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
// we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
Expand Down Expand Up @@ -1580,8 +1580,8 @@ mod tests {
// end of update_add_htlc from 0 to 1 via client and mac
ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 5300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);

// Two feerate requests to check dust exposure
ext_from_hex("00fd00fd", &mut test);
// One feerate request to check dust exposure
ext_from_hex("00fd", &mut test);

// inbound read from peer id 0 of len 18
ext_from_hex("030012", &mut test);
Expand All @@ -1604,8 +1604,8 @@ mod tests {

// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
// Three feerate requests to check dust exposure
ext_from_hex("00fd00fd00fd", &mut test);
// Four feerate requests to check dust exposure while forwarding the HTLC
ext_from_hex("00fd00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)

// connect a block with one transaction of len 125
Expand Down
2 changes: 2 additions & 0 deletions lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1361,6 +1361,8 @@ pub enum Event {
/// * When an unknown SCID is requested for forwarding a payment.
/// * Expected MPP amount has already been reached
/// * The HTLC has timed out
/// * The HTLC failed to meet the forwarding requirements (i.e. insufficient fees paid, or a
/// CLTV that is too soon)
///
/// This event, however, does not get generated if an HTLC fails to meet the forwarding
/// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
Expand Down
64 changes: 54 additions & 10 deletions lightning/src/ln/blinded_payment_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -290,8 +290,10 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
// We need the session priv to construct a bogus onion packet later.
*nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
let chan_upd_1_2 = chan_1_2.0.contents;
let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
let chan_upd_2_3 = chan_2_3.0.contents;

let amt_msat = 5000;
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
Expand Down Expand Up @@ -345,18 +347,27 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
check_added_monitors!(nodes[1], 0);
do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true);

expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);

if intro_fails {
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
let failed_destination = match check {
ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion,
ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::FailedPayment { payment_hash },
ForwardCheckFail::OutboundChannelCheck =>
HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 },
};
expect_htlc_handling_failed_destinations!(
nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()]
);
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
return
}

expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);

let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
let mut update_add = &mut updates_1_2.update_add_htlcs[0];

Expand All @@ -366,6 +377,17 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true);

expect_pending_htlcs_forwardable!(nodes[2]);
let failed_destination = match check {
ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion,
ForwardCheckFail::OutboundChannelCheck =>
HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 },
};
expect_htlc_handling_failed_destinations!(
nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()]
);
check_added_monitors!(nodes[2], 1);

let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
let update_malformed = &mut updates.update_fail_malformed_htlcs[0];
assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
Expand Down Expand Up @@ -425,7 +447,10 @@ fn failed_backwards_to_intro_node() {
nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true);
nodes[2].node.process_pending_htlc_forwards();

expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]);
check_added_monitors(&nodes[2], 1);

let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0];
Expand Down Expand Up @@ -502,7 +527,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
// intro node will error backwards.
$curr_node.node.peer_disconnected($next_node.node.get_our_node_id());
expect_pending_htlcs_forwardable!($curr_node);
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(),
vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]);
},
ProcessPendingHTLCsCheck::FwdChannelClosed => {
Expand All @@ -518,12 +543,12 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck,
crate::events::Event::ChannelClosed { .. } => {},
_ => panic!("Unexpected event {:?}", events),
}
check_closed_broadcast(&$curr_node, 1, true);
check_added_monitors!($curr_node, 1);

$curr_node.node.process_pending_htlc_forwards();
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(),
vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]);
check_closed_broadcast(&$curr_node, 1, true);
check_added_monitors!($curr_node, 1);
$curr_node.node.process_pending_htlc_forwards();
},
}
Expand Down Expand Up @@ -609,6 +634,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) {
};
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
expect_pending_htlcs_forwardable!(nodes[1]);

let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
Expand Down Expand Up @@ -914,13 +940,19 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]);
check_added_monitors(&nodes[2], 1);
},
ReceiveCheckFail::ReceiveRequirements => {
let update_add = &mut payment_event_1_2.msgs[0];
update_add.amount_msat -= 1;
nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors(&nodes[2], 1);
},
ReceiveCheckFail::ChannelCheck => {
nodes[2].node.close_channel(&chan_id_1_2, &nodes[1].node.get_our_node_id()).unwrap();
Expand All @@ -934,6 +966,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {

nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown);
commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors(&nodes[2], 1);
},
ReceiveCheckFail::ProcessPendingHTLCsCheck => {
assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV);
Expand All @@ -949,6 +984,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[2]);
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors(&nodes[2], 1);
}
}

Expand Down Expand Up @@ -1149,6 +1187,12 @@ fn min_htlc() {
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]);
check_added_monitors!(nodes[1], 0);
do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true);
expect_pending_htlcs_forwardable!(nodes[1]);
expect_htlc_handling_failed_destinations!(
nodes[1].node.get_and_clear_pending_events(),
&[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }]
);
check_added_monitors(&nodes[1], 1);
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
Expand Down
54 changes: 10 additions & 44 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4294,8 +4294,7 @@ impl<SP: Deref> Channel<SP> where
}

pub fn update_add_htlc<F: Deref>(
&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
fee_estimator: &LowerBoundedFeeEstimator<F>,
&mut self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>,
) -> Result<(), ChannelError> where F::Target: FeeEstimator {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned()));
Expand Down Expand Up @@ -4395,21 +4394,15 @@ impl<SP: Deref> Channel<SP> where
return Err(ChannelError::close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
}

if self.context.channel_state.is_local_shutdown_sent() {
if let PendingHTLCStatus::Forward(_) = pending_forward_status {
panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
}
}

// Now update local state:
self.context.next_counterparty_htlc_id += 1;
self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
htlc_id: msg.htlc_id,
amount_msat: msg.amount_msat,
payment_hash: msg.payment_hash,
cltv_expiry: msg.cltv_expiry,
state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
pending_htlc_status: pending_forward_status
state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending {
update_add_htlc: msg.clone(),
}),
});
Ok(())
Expand Down Expand Up @@ -6409,7 +6402,7 @@ impl<SP: Deref> Channel<SP> where
};
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat;
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
Expand All @@ -6429,7 +6422,7 @@ impl<SP: Deref> Channel<SP> where

let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat;
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
Expand Down Expand Up @@ -6463,11 +6456,11 @@ impl<SP: Deref> Channel<SP> where
// side, only on the sender's. Note that with anchor outputs we are no longer as
// sensitive to fee spikes, so we need to account for them.
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
}
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
if pending_remote_value_msat.saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
return Err(("Fee spike buffer violation", 0x1000|7));
}
Expand Down Expand Up @@ -8620,18 +8613,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
// called.

let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
},
_ => false,
}) {
SERIALIZATION_VERSION
} else {
MIN_SERIALIZATION_VERSION
};
write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);

// `user_id` used to be a single u64 value. In order to remain backwards compatible with
// versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
Expand Down Expand Up @@ -8689,27 +8671,11 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
&InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
&InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
1u8.write(writer)?;
if version_to_write <= 3 {
if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
pending_htlc_status.write(writer)?;
} else {
panic!();
}
} else {
htlc_resolution.write(writer)?;
}
htlc_resolution.write(writer)?;
},
&InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
2u8.write(writer)?;
if version_to_write <= 3 {
if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
pending_htlc_status.write(writer)?;
} else {
panic!();
}
} else {
htlc_resolution.write(writer)?;
}
htlc_resolution.write(writer)?;
},
&InboundHTLCState::Committed => {
3u8.write(writer)?;
Expand Down
Loading

0 comments on commit 0562830

Please sign in to comment.