diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 375beb6d66c..efafe56d548 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2543,26 +2543,24 @@ impl FailHTLCContents for msgs::OnionErrorPacket { HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self } } } -impl FailHTLCContents for (u16, [u8; 32]) { - type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion) +impl FailHTLCContents for ([u8; 32], u16) { + type Message = msgs::UpdateFailMalformedHTLC; fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message { msgs::UpdateFailMalformedHTLC { htlc_id, channel_id, - failure_code: self.0, - sha256_of_onion: self.1 + sha256_of_onion: self.0, + failure_code: self.1 } } fn to_inbound_htlc_state(self) -> InboundHTLCState { - InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailMalformed((self.1, self.0)) - ) + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self)) } fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK { HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, - failure_code: self.0, - sha256_of_onion: self.1 + sha256_of_onion: self.0, + failure_code: self.1 } } } @@ -2888,7 +2886,7 @@ impl Channel where pub fn queue_fail_malformed_htlc( &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L ) -> Result<(), ChannelError> where L::Target: Logger { - self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger) + self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } @@ -2901,7 +2899,7 @@ impl Channel where /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be /// [`ChannelError::Ignore`]. fn fail_htlc( - &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool, + &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, logger: &L ) -> Result, ChannelError> where L::Target: Logger { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { @@ -2968,7 +2966,7 @@ impl Channel where } } log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id()); - self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg)); + self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg)); return Ok(None); } @@ -2976,10 +2974,10 @@ impl Channel where E::Message::name(), &self.context.channel_id()); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - htlc.state = err_packet.clone().to_inbound_htlc_state(); + htlc.state = err_contents.clone().to_inbound_htlc_state(); } - Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id()))) + Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id()))) } // Message handlers: @@ -3576,7 +3574,7 @@ impl Channel where // the limit. In case it's less rare than I anticipate, we may want to revisit // handling this case better and maybe fulfilling some of the HTLCs while attempting // to rebalance channels. - match &htlc_update { + let fail_htlc_res = match &htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, skimmed_fee_msat, blinding_point, .. @@ -3604,6 +3602,7 @@ impl Channel where } } } + None }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { // If an HTLC claim was previously added to the holding cell (via @@ -3617,40 +3616,33 @@ impl Channel where { monitor_update } else { unreachable!() }; update_fulfill_count += 1; monitor_update.updates.append(&mut additional_monitor_update.updates); + None }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { - match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) { - Ok(update_fail_msg_option) => { - // If an HTLC failure was previously added to the holding cell (via - // `queue_fail_htlc`) then generating the fail message itself must - // not fail - we should never end up in a state where we double-fail - // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait - // for a full revocation before failing. - debug_assert!(update_fail_msg_option.is_some()); - update_fail_count += 1; - }, - Err(e) => { - if let ChannelError::Ignore(_) = e {} - else { - panic!("Got a non-IgnoreError action trying to fail holding cell HTLC"); - } - } - } + Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) }, &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) { - Ok(update_fail_malformed_opt) => { - debug_assert!(update_fail_malformed_opt.is_some()); // See above comment - update_fail_count += 1; - }, - Err(e) => { - if let ChannelError::Ignore(_) = e {} - else { - panic!("Got a non-IgnoreError action trying to fail holding cell HTLC"); - } - } - } - }, + Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) + } + }; + if let Some(res) = fail_htlc_res { + match res { + Ok(fail_msg_opt) => { + // If an HTLC failure was previously added to the holding cell (via + // `queue_fail_{malformed_}htlc`) then generating the fail message itself must + // not fail - we should never end up in a state where we double-fail + // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait + // for a full revocation before failing. + debug_assert!(fail_msg_opt.is_some()); + update_fail_count += 1; + }, + Err(ChannelError::Ignore(_)) => {}, + Err(_) => { + panic!("Got a non-IgnoreError action trying to fail holding cell HTLC"); + }, + } } } if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f08096426ff..01352ffa387 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4344,7 +4344,7 @@ where if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { let logger = WithChannelContext::from(&self.logger, &chan.context); for forward_info in pending_forwards.drain(..) { - match forward_info { + let queue_fail_htlc_res = match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { @@ -4390,40 +4390,35 @@ where )); continue; } + None }, HTLCForwardInfo::AddHTLC { .. } => { panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - if let Err(e) = chan.queue_fail_htlc( - htlc_id, err_packet, &&logger - ) { - if let ChannelError::Ignore(msg) = e { - log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_htlc() were not met"); - } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - } + Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id)) }, HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met"); - } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - } + log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + let res = chan.queue_fail_malformed_htlc( + htlc_id, failure_code, sha256_of_onion, &&logger + ); + Some((res, htlc_id)) }, + }; + if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { + if let Err(e) = queue_fail_htlc_res { + if let ChannelError::Ignore(msg) = e { + log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + } else { + panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + } } } } else {