Skip to content

Commit

Permalink
Decode fully committed incoming HTLC onions upon channel resumption
Browse files Browse the repository at this point in the history
This is currently done as soon as we receive the `UpdateAddHTLC`
message, but it will be phased out as we transition to decoding incoming
HTLC onions once the HTLC has been fully committed to by both sides.
Doing so within the monitor update completion path meshes well as it
already handles forwarding and failing back HTLCs – the two possible
outcomes of decoding an incoming HTLC onion.
  • Loading branch information
wpaulino committed Jan 23, 2024
1 parent 0951b7f commit 98be4e8
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 13 deletions.
6 changes: 4 additions & 2 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4240,13 +4240,15 @@ impl<SP: Deref> Channel<SP> where
mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
let mut finalized_claimed_htlcs = Vec::new();
mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
let mut pending_htlc_status = Vec::new();
mem::swap(&mut pending_htlc_status, &mut self.context.monitor_pending_htlc_status);

if self.context.channel_state.is_peer_disconnected() {
self.context.monitor_pending_revoke_and_ack = false;
self.context.monitor_pending_commitment_signed = false;
return MonitorRestoreUpdates {
raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_htlc_status, funding_broadcastable, channel_ready, announcement_sigs
};
}

Expand All @@ -4268,7 +4270,7 @@ impl<SP: Deref> Channel<SP> where
if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
MonitorRestoreUpdates {
raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_htlc_status, funding_broadcastable, channel_ready, announcement_sigs
}
}

Expand Down
71 changes: 60 additions & 11 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2189,16 +2189,17 @@ macro_rules! handle_monitor_update_completion {
let update_actions = $peer_state.monitor_update_blocked_actions
.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());

let htlc_forwards = $self.handle_channel_resumption(
let (htlc_forwards, mut htlc_fails) = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events, $chan, updates.raa,
updates.commitment_update, updates.order, updates.accepted_htlcs,
updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_htlc_status,
updates.funding_broadcastable, updates.channel_ready,
updates.announcement_sigs);
if let Some(upd) = channel_update {
$peer_state.pending_msg_events.push(upd);
}

let channel_id = $chan.context.channel_id();
let scid = $chan.context.get_short_channel_id().or($chan.context.latest_inbound_scid_alias());
let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);
Expand Down Expand Up @@ -2253,6 +2254,37 @@ macro_rules! handle_monitor_update_completion {
$self.forward_htlcs(&mut [forwards][..]);
}
$self.finalize_claims(updates.finalized_claimed_htlcs);
for (htlc_fail, outgoing_scid) in htlc_fails.drain(..) {
if let Some(scid) = scid {
let (channel_id, failure) = match htlc_fail {
HTLCFailureMsg::Relay(fail_htlc) => (fail_htlc.channel_id, HTLCForwardInfo::FailHTLC {
htlc_id: fail_htlc.htlc_id,
err_packet: fail_htlc.reason,
}),
HTLCFailureMsg::Malformed(fail_malformed_htlc) => (fail_malformed_htlc.channel_id, HTLCForwardInfo::FailMalformedHTLC {
htlc_id: fail_malformed_htlc.htlc_id,
sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
failure_code: fail_malformed_htlc.failure_code,
}),
};
let destination = if let Some(outgoing_scid) = outgoing_scid {
match $self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
Some((_, outgoing_channel_id)) => HTLCDestination::NextHopChannel {
node_id: Some(counterparty_node_id),
channel_id: *outgoing_channel_id,
},
None => HTLCDestination::UnknownNextHop {
requested_forward_scid: outgoing_scid,
},
}
} else {
HTLCDestination::InvalidOnion
};
$self.push_htlc_failure(scid, channel_id, failure, destination);
} else {
debug_assert!(false, "Channel with failed HTLC should have a real/alias short_channel_id")
}
}
for failure in updates.failed_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
Expand Down Expand Up @@ -5871,9 +5903,10 @@ where
fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_htlc_status: Vec<msgs::UpdateAddHTLC>,
funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
-> (Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)>, Vec<(HTLCFailureMsg, Option<u64>)>) {
let logger = WithChannelContext::from(&self.logger, &channel.context);
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
Expand All @@ -5884,10 +5917,26 @@ where
if announcement_sigs.is_some() { "sending" } else { "without" });

let mut htlc_forwards = None;

let mut htlc_fails = Vec::new();
let counterparty_node_id = channel.context.get_counterparty_node_id();
let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
for update_add_htlc in pending_htlc_status {
let decoded_hop_res = self.decode_update_add_htlc_onion(&update_add_htlc, &counterparty_node_id, Some(channel));
match decoded_hop_res {
Ok((next_hop, shared_secret, next_packet_pk_opt)) => {
match self.construct_pending_htlc_status(
&update_add_htlc, &counterparty_node_id, shared_secret, next_hop,
channel.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
) {
PendingHTLCStatus::Forward(htlc_forward) => pending_forwards.push((htlc_forward, update_add_htlc.htlc_id)),
PendingHTLCStatus::Fail(htlc_fail) => htlc_fails.push((htlc_fail, None)),
}
},
Err(e) => htlc_fails.push(e),
};
}
if !pending_forwards.is_empty() {
htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
htlc_forwards = Some((short_channel_id,
channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
}

Expand Down Expand Up @@ -5939,7 +5988,7 @@ where
emit_channel_ready_event!(pending_events, channel);
}

htlc_forwards
(htlc_forwards, htlc_fails)
}

fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
Expand Down Expand Up @@ -6687,7 +6736,7 @@ where
msg, counterparty_node_id, shared_secret, next_hop,
chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
),
Err(e) => PendingHTLCStatus::Fail(e)
Err((fail_msg, _)) => PendingHTLCStatus::Fail(fail_msg)
};
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
if msg.blinding_point.is_some() {
Expand Down Expand Up @@ -7198,10 +7247,10 @@ where
}
}
let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
let htlc_forwards = self.handle_channel_resumption(
let (htlc_forwards, htlc_fails) = self.handle_channel_resumption(
&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
debug_assert!(htlc_forwards.is_none());
Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
debug_assert!(htlc_forwards.is_none() && htlc_fails.is_empty());
if let Some(upd) = channel_update {
peer_state.pending_msg_events.push(upd);
}
Expand Down

0 comments on commit 98be4e8

Please sign in to comment.