diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 90449248e32..bcdfd53f697 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -1344,8 +1344,8 @@ mod tests { // end of update_add_htlc from 0 to 1 via client and mac ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // One feerate request to check dust exposure + ext_from_hex("00fd", &mut test); // inbound read from peer id 0 of len 18 ext_from_hex("030012", &mut test); @@ -1368,8 +1368,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Three feerate requests to check dust exposure - ext_from_hex("00fd00fd00fd", &mut test); + // Four feerate requests to check dust exposure while forwarding the HTLC + ext_from_hex("00fd00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000) // we respond with commitment_signed then revoke_and_ack (a weird, but valid, order) @@ -1445,8 +1445,8 @@ mod tests { // end of update_add_htlc from 0 to 1 via client and mac ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // One feerate request to check dust exposure + ext_from_hex("00fd", &mut test); // now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0 // inbound read from peer id 0 of len 18 @@ -1480,8 +1480,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Three feerate requests to check dust exposure - ext_from_hex("00fd00fd00fd", &mut test); + // Four feerate requests to check dust exposure while forwarding the HTLC + ext_from_hex("00fd00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc @@ -1580,8 +1580,8 @@ mod tests { // end of update_add_htlc from 0 to 1 via client and mac ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 5300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // One feerate request to check dust exposure + ext_from_hex("00fd", &mut test); // inbound read from peer id 0 of len 18 ext_from_hex("030012", &mut test); @@ -1604,8 +1604,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Three feerate requests to check dust exposure - ext_from_hex("00fd00fd00fd", &mut test); + // Four feerate requests to check dust exposure while forwarding the HTLC + ext_from_hex("00fd00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // connect a block with one transaction of len 125 diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index a7cf68dcc1c..2b81ef89f39 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1361,6 +1361,8 @@ pub enum Event { /// * When an unknown SCID is requested for forwarding a payment. /// * Expected MPP amount has already been reached /// * The HTLC has timed out + /// * The HTLC failed to meet the forwarding requirements (i.e. insufficient fees paid, or a + /// CLTV that is too soon) /// /// This event, however, does not get generated if an HTLC fails to meet the forwarding /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon). diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index d099e439ae5..f070da3710d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -290,8 +290,10 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { // We need the session priv to construct a bogus onion packet later. *nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); - let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents; - let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents; + let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_upd_1_2 = chan_1_2.0.contents; + let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + let chan_upd_2_3 = chan_2_3.0.contents; let amt_msat = 5000; let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); @@ -345,18 +347,27 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + if intro_fails { let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + let failed_destination = match check { + ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion, + ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::FailedPayment { payment_hash }, + ForwardCheckFail::OutboundChannelCheck => + HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, + }; + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()] + ); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); return } - expect_pending_htlcs_forwardable!(nodes[1]); - check_added_monitors!(nodes[1], 1); - let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; @@ -366,6 +377,17 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + let failed_destination = match check { + ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, + ForwardCheckFail::OutboundChannelCheck => + HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, + }; + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()] + ); + check_added_monitors!(nodes[2], 1); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); @@ -425,7 +447,10 @@ fn failed_backwards_to_intro_node() { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); - nodes[2].node.process_pending_htlc_forwards(); + + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -502,7 +527,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, // intro node will error backwards. $curr_node.node.peer_disconnected($next_node.node.get_our_node_id()); expect_pending_htlcs_forwardable!($curr_node); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node, + expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); }, ProcessPendingHTLCsCheck::FwdChannelClosed => { @@ -518,12 +543,12 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, crate::events::Event::ChannelClosed { .. } => {}, _ => panic!("Unexpected event {:?}", events), } + check_closed_broadcast(&$curr_node, 1, true); + check_added_monitors!($curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node, + expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]); - check_closed_broadcast(&$curr_node, 1, true); - check_added_monitors!($curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); }, } @@ -609,6 +634,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -914,6 +940,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ReceiveRequirements => { let update_add = &mut payment_event_1_2.msgs[0]; @@ -921,6 +950,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ChannelCheck => { nodes[2].node.close_channel(&chan_id_1_2, &nodes[1].node.get_our_node_id()).unwrap(); @@ -934,6 +966,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ProcessPendingHTLCsCheck => { assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV); @@ -949,6 +984,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors(&nodes[2], 1); } } @@ -1149,6 +1187,12 @@ fn min_htlc() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] + ); + check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8fe8a33400b..1249b37a95d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -4294,8 +4294,7 @@ impl Channel where } pub fn update_add_htlc( - &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus, - fee_estimator: &LowerBoundedFeeEstimator, + &mut self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, ) -> Result<(), ChannelError> where F::Target: FeeEstimator { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned())); @@ -4395,12 +4394,6 @@ impl Channel where return Err(ChannelError::close("Remote provided CLTV expiry in seconds instead of block height".to_owned())); } - if self.context.channel_state.is_local_shutdown_sent() { - if let PendingHTLCStatus::Forward(_) = pending_forward_status { - panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing"); - } - } - // Now update local state: self.context.next_counterparty_htlc_id += 1; self.context.pending_inbound_htlcs.push(InboundHTLCOutput { @@ -4408,8 +4401,8 @@ impl Channel where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved { - pending_htlc_status: pending_forward_status + state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending { + update_add_htlc: msg.clone(), }), }); Ok(()) @@ -6409,7 +6402,7 @@ impl Channel where }; let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat; if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); @@ -6429,7 +6422,7 @@ impl Channel where let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat; if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); @@ -6463,11 +6456,11 @@ impl Channel where // side, only on the sender's. Note that with anchor outputs we are no longer as // sensitive to fee spikes, so we need to account for them. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None); if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; } - if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { + if pending_remote_value_msat.saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); return Err(("Fee spike buffer violation", 0x1000|7)); } @@ -8559,7 +8552,7 @@ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) } const SERIALIZATION_VERSION: u8 = 4; -const MIN_SERIALIZATION_VERSION: u8 = 3; +const MIN_SERIALIZATION_VERSION: u8 = 4; impl_writeable_tlv_based_enum_legacy!(InboundHTLCRemovalReason,; (0, FailRelay), @@ -8620,18 +8613,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. - let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state { - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)| - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { - matches!(htlc_resolution, InboundHTLCResolution::Pending { .. }) - }, - _ => false, - }) { - SERIALIZATION_VERSION - } else { - MIN_SERIALIZATION_VERSION - }; - write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION); + write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write @@ -8689,27 +8671,11 @@ impl Writeable for Channel where SP::Target: SignerProvider { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { 1u8.write(writer)?; - if version_to_write <= 3 { - if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { - pending_htlc_status.write(writer)?; - } else { - panic!(); - } - } else { - htlc_resolution.write(writer)?; - } + htlc_resolution.write(writer)?; }, &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { 2u8.write(writer)?; - if version_to_write <= 3 { - if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { - pending_htlc_status.write(writer)?; - } else { - panic!(); - } - } else { - htlc_resolution.write(writer)?; - } + htlc_resolution.write(writer)?; }, &InboundHTLCState::Committed => { 3u8.write(writer)?; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5f8dc1e5541..60a78496951 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3886,34 +3886,6 @@ where }) } - fn decode_update_add_htlc_onion( - &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, - ) -> Result< - (onion_utils::Hop, [u8; 32], Option>), HTLCFailureMsg - > { - let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion( - msg, &*self.node_signer, &*self.logger, &self.secp_ctx - )?; - - let next_packet_details = match next_packet_details_opt { - Some(next_packet_details) => next_packet_details, - // it is a receive, so no need for outbound checks - None => return Ok((next_hop, shared_secret, None)), - }; - - // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we - // can't hold the outbound peer state lock at the same time as the inbound peer state lock. - self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| { - let (err_msg, err_code, chan_update_opt) = e; - self.htlc_failure_from_update_add_err( - msg, counterparty_node_id, err_msg, err_code, chan_update_opt, - next_hop.is_intro_node_blinded_forward(), &shared_secret - ) - })?; - - Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey))) - } - fn construct_pending_htlc_status<'a>( &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop, allow_underpay: bool, @@ -5065,7 +5037,7 @@ where Ok(()) } - fn process_pending_update_add_htlcs(&self) { + pub(crate) fn process_pending_update_add_htlcs(&self) { let mut decode_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); @@ -7917,7 +7889,6 @@ where // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error // closing a channel), so any changes are likely to be lost on restart! - let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -7929,53 +7900,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - let mut pending_forward_info = match decoded_hop_res { - Ok((next_hop, shared_secret, next_packet_pk_opt)) => - self.construct_pending_htlc_status( - msg, counterparty_node_id, shared_secret, next_hop, - chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt, - ), - Err(e) => PendingHTLCStatus::Fail(e) - }; - let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash)); - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) { - if msg.blinding_point.is_some() { - pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( - msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, - } - )) - } else { - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { - ref incoming_shared_secret, ref routing, .. - }) => { - let reason = if routing.blinded_failure().is_some() { - HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]) - } else if (error_code & 0x1000) != 0 { - let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - HTLCFailReason::reason(real_code, error_data) - } else { - HTLCFailReason::from_failure_code(error_code) - }.get_encrypted_failure_packet(incoming_shared_secret, &None); - let msg = msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason - }; - pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)); - }, - _ => {}, - } - } - } - try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry); + try_chan_phase_entry!(self, chan.update_add_htlc(&msg, &self.fee_estimator), chan_phase_entry); } else { return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry); @@ -13278,6 +13203,11 @@ mod tests { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash: mismatch_payment_hash }]); + check_added_monitors(&nodes[1], 1); + let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1); } @@ -13325,6 +13255,11 @@ mod tests { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors(&nodes[1], 1); + let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "We don't support MPP keysend payments", 1); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7e011e65efd..a41c8efcc9b 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1870,15 +1870,18 @@ macro_rules! expect_pending_htlcs_forwardable_conditions { #[macro_export] macro_rules! expect_htlc_handling_failed_destinations { ($events: expr, $expected_failures: expr) => {{ + let mut num_expected_failures = $expected_failures.len(); for event in $events { match event { $crate::events::Event::PendingHTLCsForwardable { .. } => { }, $crate::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => { - assert!($expected_failures.contains(&failed_next_destination)) + assert!($expected_failures.contains(&failed_next_destination)); + num_expected_failures -= 1; }, _ => panic!("Unexpected destination"), } } + assert_eq!(num_expected_failures, 0); }} } @@ -2633,6 +2636,8 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option if is_last_hop && is_probe { commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(node); + check_added_monitors(node, 1); } else { commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(node); @@ -2711,22 +2716,26 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path do_pass_along_path(args) } -pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]]) { +pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[(&[&Node<'a, 'b, 'c>], PaymentHash)]) { let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); check_added_monitors!(origin_node, expected_route.len()); - for path in expected_route.iter() { + for (path, payment_hash) in expected_route.iter() { let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events); - do_pass_along_path(PassAlongPathArgs::new(origin_node, path, 0, PaymentHash([0_u8; 32]), ev) + do_pass_along_path(PassAlongPathArgs::new(origin_node, path, 0, *payment_hash, ev) .is_probe() .without_clearing_recipient_events()); let nodes_to_fail_payment: Vec<_> = vec![origin_node].into_iter().chain(path.iter().cloned()).collect(); fail_payment_along_path(nodes_to_fail_payment.as_slice()); + expect_htlc_handling_failed_destinations!( + path.last().unwrap().node.get_and_clear_pending_events(), + &[HTLCDestination::FailedPayment { payment_hash: *payment_hash }] + ); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index efd2fc9e9d6..dac12b3a6c5 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1551,6 +1551,8 @@ fn test_fee_spike_violation_fails_htlc() { next_local_nonce: None, }; nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_msg); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1565,7 +1567,7 @@ fn test_fee_spike_violation_fails_htlc() { nodes[1].logger.assert_log("lightning::ln::channel", format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); - check_added_monitors!(nodes[1], 2); + check_added_monitors!(nodes[1], 3); } #[test] @@ -6869,6 +6871,9 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -6933,6 +6938,9 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); @@ -7004,6 +7012,9 @@ fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); @@ -10064,9 +10075,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // Outbound dust balance: 4372 sats // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats for _ in 0..dust_outbound_htlc_on_holder_tx { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + route_payment(&nodes[0], &[&nodes[1]], dust_outbound_htlc_on_holder_tx_msat); } } else { // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) @@ -10081,9 +10090,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) // Outbound dust balance: 5000 sats for _ in 0..dust_htlc_on_counterparty_tx - 1 { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + route_payment(&nodes[0], &[&nodes[1]], dust_htlc_on_counterparty_tx_msat); } } else { // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) @@ -10116,6 +10123,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[0]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); // With default dust exposure: 5000 sats if on_holder_tx { // Outbound dust balance: 6399 sats diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index b163968792d..d296f83d810 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -105,6 +105,9 @@ fn run_onion_failure_test_with_fail_intercept( let update_1_0 = match test_case { 0|100 => { // intermediate node failure; fail backward to 0 + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[expected_htlc_destination.clone().unwrap()]); + check_added_monitors(&nodes[1], 1); let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1_0.update_fail_htlcs.len()+update_1_0.update_fail_malformed_htlcs.len()==1 && (update_1_0.update_fail_htlcs.len()==1 || update_1_0.update_fail_malformed_htlcs.len()==1)); update_1_0 @@ -135,12 +138,13 @@ fn run_onion_failure_test_with_fail_intercept( expect_event!(&nodes[2], Event::PaymentClaimable); callback_node(); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + } else if test_case == 1 || test_case == 3 { + expect_htlc_forward!(&nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), vec![expected_htlc_destination.clone().unwrap()]); } + check_added_monitors!(&nodes[2], 1); let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - if test_case == 2 || test_case == 200 { - check_added_monitors!(&nodes[2], 1); - } assert!(update_2_1.update_fail_htlcs.len() == 1); let mut fail_msg = update_2_1.update_fail_htlcs[0].clone(); @@ -301,9 +305,10 @@ fn test_fee_failures() { // because we ignore channel update contents, we will still blame the 2nd channel. let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); + }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), + Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); // In an earlier version, we spuriously failed to forward payments if the expected feerate // changed between the channel open and the payment. @@ -349,6 +354,8 @@ fn test_onion_failure() { // positive case send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000); + let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; + // intermediate node failure let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("invalid_realm", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -366,7 +373,7 @@ fn test_onion_failure() { // describing a length-1 TLV payload, which is obviously bogus. new_payloads[0].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // final node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -385,7 +392,7 @@ fn test_onion_failure() { // length-1 TLV payload, which is obviously bogus. new_payloads[1].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages @@ -398,7 +405,7 @@ fn test_onion_failure() { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), NODE|2, &[0;0]); - }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), None); + }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -418,7 +425,7 @@ fn test_onion_failure() { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); - }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); + }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -439,7 +446,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); + }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -455,13 +462,13 @@ fn test_onion_failure() { // the UpdateAddHTLC that we sent. let short_channel_id = channels[0].0.contents.short_channel_id; run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, - Some(BADONION|PERM|4), None, Some(short_channel_id), None); + Some(BADONION|PERM|4), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true, - Some(BADONION|PERM|5), None, Some(short_channel_id), None); + Some(BADONION|PERM|5), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true, - Some(BADONION|PERM|6), None, Some(short_channel_id), None); + Some(BADONION|PERM|6), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); let short_channel_id = channels[1].0.contents.short_channel_id; let chan_update = ChannelUpdate::dummy(short_channel_id); @@ -478,7 +485,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data); }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); // Check we can still handle onion failures that include channel updates without a type prefix let err_data_without_type = chan_update.encode_with_len(); @@ -490,7 +497,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data_without_type); }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -500,7 +507,7 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|8, &[0;0]); // short_channel_id from the processing node - }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -510,13 +517,13 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|9, &[0;0]); // short_channel_id from the processing node - }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let mut bogus_route = route.clone(); bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; - run_onion_failure_test("unknown_next_peer", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(PERM|10), - Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), None); + run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(PERM|10), + Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id })); let short_channel_id = channels[1].0.contents.short_channel_id; let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) @@ -525,9 +532,9 @@ fn test_onion_failure() { let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].hops.len(); bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward; - run_onion_failure_test("amount_below_minimum", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), + run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); // Clear pending payments so that the following positive test has the correct payment hash. for node in nodes.iter() { @@ -542,25 +549,25 @@ fn test_onion_failure() { // We ignore channel update contents in onion errors, so will blame the 2nd channel even though // the first node is the one that messed up. let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); + }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("incorrect_cltv_expiry", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value msg.cltv_expiry -= 1; - }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); + }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("expiry_too_soon", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); }, ||{}, true, Some(UPDATE|14), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[2].node.fail_htlc_backwards(&payment_hash); @@ -572,9 +579,10 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(0x4000 | 15), None, None, None); + }, || {}, false, Some(0x4000 | 15), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + nodes[1].node.process_pending_update_add_htlcs(); for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { match f { @@ -584,9 +592,10 @@ fn test_onion_failure() { } } } - }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id), None); + }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + nodes[1].node.process_pending_update_add_htlcs(); // violate amt_to_forward > msg.amount_msat for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { @@ -597,17 +606,17 @@ fn test_onion_failure() { } } } - }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id), None); + }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); }, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); - run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + Some(short_channel_id), Some(next_hop_failure.clone())); + run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] for _ in 0..DISABLE_GOSSIP_TICKS + 1 { nodes[1].node.timer_tick_occurred(); @@ -617,10 +626,10 @@ fn test_onion_failure() { nodes[2].node.get_and_clear_pending_msg_events(); }, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); - run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("expiry_too_far", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let mut route = route.clone(); let height = nodes[2].best_block_info().1; @@ -632,7 +641,7 @@ fn test_onion_failure() { let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); msg.cltv_expiry = htlc_cltv; msg.onion_routing_packet = onion_packet; - }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); + }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test_with_fail_intercept("mpp_timeout", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // Tamper returning error message @@ -686,7 +695,7 @@ fn test_onion_failure() { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, }), - Some(channels[1].0.contents.short_channel_id), None); + Some(channels[1].0.contents.short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test_with_fail_intercept("0-length channel update in final node UPDATE onion failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); @@ -860,9 +869,9 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { let short_channel_id = channel_to_update.1; let network_update = NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }; run_onion_failure_test( - name, 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, + name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, Some(error_code), Some(network_update), Some(short_channel_id), - None, + Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), ); }; @@ -1143,6 +1152,8 @@ fn test_phantom_onion_hmac_failure() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_update_add_htlcs(); // Modify the payload so the phantom hop's HMAC is bogus. let sha256_of_onion = { @@ -1161,7 +1172,6 @@ fn test_phantom_onion_hmac_failure() { _ => panic!("Unexpected forward"), } }; - expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); @@ -1205,6 +1215,8 @@ fn test_phantom_invalid_onion_payload() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_update_add_htlcs(); // Modify the onion packet to have an invalid payment amount. for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -1237,7 +1249,6 @@ fn test_phantom_invalid_onion_payload() { } } } - expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); @@ -1280,6 +1291,8 @@ fn test_phantom_final_incorrect_cltv_expiry() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_update_add_htlcs(); // Modify the payload so the phantom hop's HMAC is bogus. for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -1294,7 +1307,6 @@ fn test_phantom_final_incorrect_cltv_expiry() { } } } - expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); @@ -1391,6 +1403,12 @@ fn test_phantom_failure_modified_cltv() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + ); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); @@ -1431,6 +1449,12 @@ fn test_phantom_failure_expires_too_soon() { connect_blocks(&nodes[1], CLTV_FAR_FAR_AWAY); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + ); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); @@ -1518,7 +1542,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { // Get the route with an amount exceeding the dust exposure threshold of nodes[1]. let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1], Some(max_dust_exposure + 1)); - let (mut route, _) = get_phantom_route!(nodes, max_dust_exposure + 1, channel); + let (mut route, phantom_scid) = get_phantom_route!(nodes, max_dust_exposure + 1, channel); // Route the HTLC through to the destination. nodes[0].node.send_payment_with_route(route.clone(), payment_hash, @@ -1529,6 +1553,12 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + ); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); @@ -1543,9 +1573,8 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { err_data.extend_from_slice(&channel.1.encode()); let mut fail_conditions = PaymentFailedConditions::new() - .blamed_scid(route.paths[0].hops.last().as_ref().unwrap().short_channel_id) - .blamed_chan_closed(false) - .expected_htlc_error_data(0x1000 | 7, &err_data); + .blamed_scid(phantom_scid) + .expected_htlc_error_data(0x2000 | 2, &[]); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index eac2a35c6a1..c758d3b3159 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -519,6 +519,8 @@ fn test_reject_mpp_keysend_htlc() { let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); commitment_signed_dance!(nodes[3], nodes[1], update_1.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_update_add_htlcs(); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -539,7 +541,7 @@ fn test_reject_mpp_keysend_htlc() { } } } - expect_pending_htlcs_forwardable!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); // Pay along nodes[2] route.paths[0].hops[0].pubkey = nodes[2].node.get_our_node_id(); @@ -561,6 +563,8 @@ fn test_reject_mpp_keysend_htlc() { let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &update_add_3); commitment_signed_dance!(nodes[3], nodes[2], update_3.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_update_add_htlcs(); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -581,7 +585,8 @@ fn test_reject_mpp_keysend_htlc() { } } } - expect_pending_htlcs_forwardable!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] @@ -596,7 +601,6 @@ fn test_reject_mpp_keysend_htlc() { commitment_signed_dance!(nodes[0], nodes[2], update_fail_1.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); } @@ -677,6 +681,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + ); + check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1305,7 +1315,7 @@ fn successful_probe_yields_event() { let res = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2]], res.0)]; send_probe_along_route(&nodes[0], expected_route); @@ -1446,7 +1456,7 @@ fn preflight_probes_yield_event_skip_private_hop() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[2], &nodes[3]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[3]], res[0].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1492,7 +1502,7 @@ fn preflight_probes_yield_event() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[3]], res[0].0), (&[&nodes[2], &nodes[3]], res[1].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1539,7 +1549,7 @@ fn preflight_probes_yield_event_and_skip() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route : &[&[&Node]] = &[&[&nodes[1], &nodes[2], &nodes[4]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[4]], res[0].0)]; // We check that only one probe was sent, the other one was skipped due to limited liquidity. assert_eq!(res.len(), 1); @@ -1922,6 +1932,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); @@ -2106,6 +2117,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { for (idx, ev) in events.into_iter().enumerate() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &ev.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -2914,8 +2926,10 @@ fn no_extra_retries_on_back_to_back_fail() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id; - let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id; + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + let chan_1_scid = chan_1.0.contents.short_channel_id; + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); + let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); @@ -2984,66 +2998,58 @@ fn no_extra_retries_on_back_to_back_fail() { route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); + // We can't use the commitment_signed_dance macro helper because in this test we'll be sending + // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a + // time. nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); - let htlc_updates = SendEvent::from_node(&nodes[0]); + + let first_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc_updates.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); + let second_htlc_updates = SendEvent::from_node(&nodes[0]); + assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + check_added_monitors!(nodes[1], 1); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - check_added_monitors!(nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed); + nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_cs); check_added_monitors!(nodes[0], 1); - let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); - let bs_second_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_third_cs); - check_added_monitors!(nodes[1], 1); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_second_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_fail_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); - let (as_third_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_third_raa); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_fourth_cs); - check_added_monitors!(nodes[1], 1); - let bs_fourth_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable!(nodes[1]); + let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); + check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_fourth_raa); - check_added_monitors!(nodes[0], 1); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert_eq!(bs_fail_update.update_fail_htlcs.len(), 2); + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], bs_fail_update.commitment_signed, false); // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two // pending `PaymentPathFailed` events, one with `all_paths_failed` unset, and the second @@ -3084,6 +3090,10 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + check_added_monitors(&nodes[1], 1); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true); @@ -3119,8 +3129,10 @@ fn test_simple_partial_retry() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id; - let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id; + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + let chan_1_scid = chan_1.0.contents.short_channel_id; + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); + let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); @@ -3189,52 +3201,64 @@ fn test_simple_partial_retry() { route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); + // We can't use the commitment_signed_dance macro helper because in this test we'll be sending + // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a + // time. nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); - let htlc_updates = SendEvent::from_node(&nodes[0]); + let first_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc_updates.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); + let second_htlc_updates = SendEvent::from_node(&nodes[0]); + assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); - check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); check_added_monitors!(nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); - check_added_monitors!(nodes[1], 1); - - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_third_cs); - check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable!(nodes[1]); + let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + check_added_monitors(&nodes[1], 2); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); + { + let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + let mut handle_update_htlcs = |event: MessageSendEvent| { + if let MessageSendEvent::UpdateHTLCs { node_id, updates } = event { + if node_id == nodes[0].node.get_our_node_id() { + assert_eq!(updates.update_fail_htlcs.len(), 1); + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); + } else if node_id == nodes[2].node.get_our_node_id() { + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], &updates.commitment_signed, false); + } else { + panic!("Unexpected node_id for UpdateHTLCs send"); + } + } else { + panic!("Unexpected event"); + } + }; + handle_update_htlcs(msg_events.remove(0)); + handle_update_htlcs(msg_events.remove(0)); + } let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -3260,10 +3284,9 @@ fn test_simple_partial_retry() { expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); - let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]); - commitment_signed_dance!(nodes[2], nodes[1], &bs_forward_update.commitment_signed, false); + let bs_second_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_second_forward_update.update_add_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward_update.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[2]); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); @@ -3398,6 +3421,13 @@ fn test_threaded_payment_retries() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] + ); + check_added_monitors(&nodes[1], 1); // Note that we only push one route into `expect_find_route` at a time, because that's all // the retries (should) need. If the bug is reintroduced "real" routes may be selected, but @@ -4073,6 +4103,12 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &c_recv_ev.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + ); + check_added_monitors(&nodes[2], 1); let cs_fail = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &cs_fail.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 248f2dfb0ac..27937dd337a 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -77,6 +77,12 @@ fn test_priv_forwarding_rejection() { let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] + ); + check_added_monitors(&nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_fail_updates.update_add_htlcs.is_empty()); @@ -441,6 +447,12 @@ fn test_inbound_scid_privacy() { assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] + ); + check_added_monitors(&nodes[1], 1); nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", regex::Regex::new(r"Refusing to forward over real channel SCID as our counterparty requested").unwrap(), 1); @@ -541,6 +553,13 @@ fn test_scid_alias_returned() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] + ); + check_added_monitors(&nodes[1], 1); + let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 129b83eae71..bd779a18da7 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -742,10 +742,6 @@ fn test_forwardable_regen() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - // There is already a PendingHTLCsForwardable event "pending" so another one will not be - // generated - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); @@ -998,9 +994,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht // present when we serialized. let node_encoded = nodes[1].node.encode(); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + let mut intercept_id = None; let mut expected_outbound_amount_msat = None; if use_intercept { + nodes[1].node.process_pending_update_add_htlcs(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -1014,7 +1013,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht nodes[2].node.get_our_node_id(), expected_outbound_amount_msat.unwrap()).unwrap(); } - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let payment_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9fd428329af..ba682f68375 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -465,6 +465,12 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { check_added_monitors!(nodes[1], 1); nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &node_0_shutdown); commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] + ); + check_added_monitors(&nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates_2.update_add_htlcs.is_empty());