From 883afb38d47cb5d8d8721a356302b1e41b7c476c Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 7 Mar 2023 21:56:01 +0200 Subject: [PATCH 01/25] Move `Channel` fields into `ChannelContext` struct This is a first step for simplifying the channel state and introducing new unfunded channel types that hold similar state before being promoted to funded channels. Essentially, we want the outer `Channel` type (and upcoming channel types) to wrap the context so we can apply typestate patterns to the that wrapper while also deduplicating code for common state and other internal fields. --- lightning/src/ln/chanmon_update_fail_tests.rs | 4 +- lightning/src/ln/channel.rs | 2623 +++++++++-------- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/functional_tests.rs | 20 +- lightning/src/ln/shutdown_tests.rs | 2 +- 5 files changed, 1331 insertions(+), 1320 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 015650cb823..707c27908ed 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1456,12 +1456,12 @@ fn monitor_failed_no_reestablish_response() { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } { let mut node_1_per_peer_lock; let mut node_1_peer_state_lock; - get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8cb73241562..195e261fe6f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -491,7 +491,7 @@ pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5; /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect /// them. /// -/// See [`Channel::sent_message_awaiting_response`] for more information. +/// See [`ChannelContext::sent_message_awaiting_response`] for more information. pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2; struct PendingChannelMonitorUpdate { @@ -509,14 +509,8 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { (2, blocked, required), }); -// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking -// has been completed, and then turn into a Channel to get compiler-time enforcement of things like -// calling channel_id() before we're set up or things like get_outbound_funding_signed on an -// inbound channel. -// -// Holder designates channel data owned for the benefice of the user client. -// Counterparty designates channel data owned by the another channel participant entity. -pub(super) struct Channel { +/// Contains everything about the channel including state, and various flags. +pub(super) struct ChannelContext { config: LegacyChannelConfig, // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were @@ -790,6 +784,17 @@ pub(super) struct Channel { pending_monitor_updates: Vec, } +// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking +// has been completed, and then turn into a Channel to get compiler-time enforcement of things like +// calling channel_id() before we're set up or things like get_outbound_funding_signed on an +// inbound channel. +// +// Holder designates channel data owned for the benefit of the user client. +// Counterparty designates channel data owned by the another channel participant entity. +pub(super) struct Channel { + pub context: ChannelContext, +} + #[cfg(any(test, fuzzing))] struct CommitmentTxInfoCached { fee: u64, @@ -920,7 +925,7 @@ impl Channel { } pub(crate) fn opt_anchors(&self) -> bool { - self.channel_transaction_parameters.opt_anchors.is_some() + self.context.channel_transaction_parameters.opt_anchors.is_some() } fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { @@ -953,8 +958,8 @@ impl Channel { /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result { - if !self.is_outbound() || self.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } - if self.channel_type == ChannelTypeFeatures::only_static_remote_key() { + if !self.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } + if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { // We've exhausted our options return Err(()); } @@ -967,14 +972,14 @@ impl Channel { // checks whether the counterparty supports every feature, this would only happen if the // counterparty is advertising the feature, but rejecting channels proposing the feature for // whatever reason. - if self.channel_type.supports_anchors_zero_fee_htlc_tx() { - self.channel_type.clear_anchors_zero_fee_htlc_tx(); - assert!(self.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none()); - self.channel_transaction_parameters.opt_anchors = None; - } else if self.channel_type.supports_scid_privacy() { - self.channel_type.clear_scid_privacy(); + if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + self.context.channel_type.clear_anchors_zero_fee_htlc_tx(); + assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none()); + self.context.channel_transaction_parameters.opt_anchors = None; + } else if self.context.channel_type.supports_scid_privacy() { + self.context.channel_type.clear_scid_privacy(); } else { - self.channel_type = ChannelTypeFeatures::only_static_remote_key(); + self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); } Ok(self.get_open_channel(chain_hash)) } @@ -1049,130 +1054,132 @@ impl Channel { let temporary_channel_id = entropy_source.get_secure_random_bytes(); Ok(Channel { - user_id, + context: ChannelContext { + user_id, - config: LegacyChannelConfig { - options: config.channel_config.clone(), - announced_channel: config.channel_handshake_config.announced_channel, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, - }, + config: LegacyChannelConfig { + options: config.channel_config.clone(), + announced_channel: config.channel_handshake_config.announced_channel, + commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + }, - prev_config: None, - - inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), - - channel_id: temporary_channel_id, - temporary_channel_id: Some(temporary_channel_id), - channel_state: ChannelState::OurInitSent as u32, - announcement_sigs_state: AnnouncementSigsState::NotSent, - secp_ctx, - channel_value_satoshis, - - latest_monitor_update_id: 0, - - holder_signer, - shutdown_scriptpubkey, - destination_script, - - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat, - - pending_inbound_htlcs: Vec::new(), - pending_outbound_htlcs: Vec::new(), - holding_cell_htlc_updates: Vec::new(), - pending_update_fee: None, - holding_cell_update_fee: None, - next_holder_htlc_id: 0, - next_counterparty_htlc_id: 0, - update_time_counter: 1, - - resend_order: RAACommitmentOrder::CommitmentFirst, - - monitor_pending_channel_ready: false, - monitor_pending_revoke_and_ack: false, - monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), - monitor_pending_failures: Vec::new(), - monitor_pending_finalized_fulfills: Vec::new(), - - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw: None, - - inbound_awaiting_accept: false, - - funding_tx_confirmed_in: None, - funding_tx_confirmation_height: 0, - short_channel_id: None, - channel_creation_height: current_chain_height, - - feerate_per_kw: feerate, - counterparty_dust_limit_satoshis: 0, - holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: 0, - holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel - holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: 0, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: 0, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), - minimum_depth: None, // Filled in in accept_channel - - counterparty_forwarding_info: None, - - channel_transaction_parameters: ChannelTransactionParameters { - holder_pubkeys: pubkeys, - holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: true, - counterparty_parameters: None, - funding_outpoint: None, - opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, - opt_non_zero_fee_anchors: None - }, - funding_transaction: None, + prev_config: None, + + inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), + + channel_id: temporary_channel_id, + temporary_channel_id: Some(temporary_channel_id), + channel_state: ChannelState::OurInitSent as u32, + announcement_sigs_state: AnnouncementSigsState::NotSent, + secp_ctx, + channel_value_satoshis, + + latest_monitor_update_id: 0, + + holder_signer, + shutdown_scriptpubkey, + destination_script, + + cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + value_to_self_msat, + + pending_inbound_htlcs: Vec::new(), + pending_outbound_htlcs: Vec::new(), + holding_cell_htlc_updates: Vec::new(), + pending_update_fee: None, + holding_cell_update_fee: None, + next_holder_htlc_id: 0, + next_counterparty_htlc_id: 0, + update_time_counter: 1, + + resend_order: RAACommitmentOrder::CommitmentFirst, + + monitor_pending_channel_ready: false, + monitor_pending_revoke_and_ack: false, + monitor_pending_commitment_signed: false, + monitor_pending_forwards: Vec::new(), + monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), + + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, + + inbound_awaiting_accept: false, + + funding_tx_confirmed_in: None, + funding_tx_confirmation_height: 0, + short_channel_id: None, + channel_creation_height: current_chain_height, + + feerate_per_kw: feerate, + counterparty_dust_limit_satoshis: 0, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, + counterparty_max_htlc_value_in_flight_msat: 0, + holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel + holder_selected_channel_reserve_satoshis, + counterparty_htlc_minimum_msat: 0, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + counterparty_max_accepted_htlcs: 0, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), + minimum_depth: None, // Filled in in accept_channel + + counterparty_forwarding_info: None, + + channel_transaction_parameters: ChannelTransactionParameters { + holder_pubkeys: pubkeys, + holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, + is_outbound_from_holder: true, + counterparty_parameters: None, + funding_outpoint: None, + opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, + opt_non_zero_fee_anchors: None + }, + funding_transaction: None, - counterparty_cur_commitment_point: None, - counterparty_prev_commitment_point: None, - counterparty_node_id, + counterparty_cur_commitment_point: None, + counterparty_prev_commitment_point: None, + counterparty_node_id, - counterparty_shutdown_scriptpubkey: None, + counterparty_shutdown_scriptpubkey: None, - commitment_secrets: CounterpartyCommitmentSecrets::new(), + commitment_secrets: CounterpartyCommitmentSecrets::new(), - channel_update_status: ChannelUpdateStatus::Enabled, - closing_signed_in_flight: false, + channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, - announcement_sigs: None, + announcement_sigs: None, - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), - workaround_lnd_bug_4006: None, - sent_message_awaiting_response: None, + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, - latest_inbound_scid_alias: None, - outbound_scid_alias, + latest_inbound_scid_alias: None, + outbound_scid_alias, - channel_pending_event_emitted: false, - channel_ready_event_emitted: false, + channel_pending_event_emitted: false, + channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills: HashSet::new(), - channel_type, - channel_keys_id, + channel_type, + channel_keys_id, - pending_monitor_updates: Vec::new(), + pending_monitor_updates: Vec::new(), + } }) } @@ -1406,133 +1413,135 @@ impl Channel { secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); let chan = Channel { - user_id, + context: ChannelContext { + user_id, - config: LegacyChannelConfig { - options: config.channel_config.clone(), - announced_channel, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, - }, + config: LegacyChannelConfig { + options: config.channel_config.clone(), + announced_channel, + commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + }, - prev_config: None, - - inbound_handshake_limits_override: None, - - channel_id: msg.temporary_channel_id, - temporary_channel_id: Some(msg.temporary_channel_id), - channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), - announcement_sigs_state: AnnouncementSigsState::NotSent, - secp_ctx, - - latest_monitor_update_id: 0, - - holder_signer, - shutdown_scriptpubkey, - destination_script, - - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat: msg.push_msat, - - pending_inbound_htlcs: Vec::new(), - pending_outbound_htlcs: Vec::new(), - holding_cell_htlc_updates: Vec::new(), - pending_update_fee: None, - holding_cell_update_fee: None, - next_holder_htlc_id: 0, - next_counterparty_htlc_id: 0, - update_time_counter: 1, - - resend_order: RAACommitmentOrder::CommitmentFirst, - - monitor_pending_channel_ready: false, - monitor_pending_revoke_and_ack: false, - monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), - monitor_pending_failures: Vec::new(), - monitor_pending_finalized_fulfills: Vec::new(), - - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), - - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw: None, - - inbound_awaiting_accept: true, - - funding_tx_confirmed_in: None, - funding_tx_confirmation_height: 0, - short_channel_id: None, - channel_creation_height: current_chain_height, - - feerate_per_kw: msg.feerate_per_kw, - channel_value_satoshis: msg.funding_satoshis, - counterparty_dust_limit_satoshis: msg.dust_limit_satoshis, - holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000), - holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis), - holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), - minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)), - - counterparty_forwarding_info: None, - - channel_transaction_parameters: ChannelTransactionParameters { - holder_pubkeys: pubkeys, - holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: false, - counterparty_parameters: Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.to_self_delay, - pubkeys: counterparty_pubkeys, - }), - funding_outpoint: None, - opt_anchors: if opt_anchors { Some(()) } else { None }, - opt_non_zero_fee_anchors: None - }, - funding_transaction: None, + prev_config: None, + + inbound_handshake_limits_override: None, + + temporary_channel_id: Some(msg.temporary_channel_id), + channel_id: msg.temporary_channel_id, + channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), + announcement_sigs_state: AnnouncementSigsState::NotSent, + secp_ctx, + + latest_monitor_update_id: 0, + + holder_signer, + shutdown_scriptpubkey, + destination_script, + + cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + value_to_self_msat: msg.push_msat, + + pending_inbound_htlcs: Vec::new(), + pending_outbound_htlcs: Vec::new(), + holding_cell_htlc_updates: Vec::new(), + pending_update_fee: None, + holding_cell_update_fee: None, + next_holder_htlc_id: 0, + next_counterparty_htlc_id: 0, + update_time_counter: 1, + + resend_order: RAACommitmentOrder::CommitmentFirst, + + monitor_pending_channel_ready: false, + monitor_pending_revoke_and_ack: false, + monitor_pending_commitment_signed: false, + monitor_pending_forwards: Vec::new(), + monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), + + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), + + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, + + inbound_awaiting_accept: true, + + funding_tx_confirmed_in: None, + funding_tx_confirmation_height: 0, + short_channel_id: None, + channel_creation_height: current_chain_height, + + feerate_per_kw: msg.feerate_per_kw, + channel_value_satoshis: msg.funding_satoshis, + counterparty_dust_limit_satoshis: msg.dust_limit_satoshis, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, + counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000), + holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis), + holder_selected_channel_reserve_satoshis, + counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), + minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)), + + counterparty_forwarding_info: None, + + channel_transaction_parameters: ChannelTransactionParameters { + holder_pubkeys: pubkeys, + holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, + is_outbound_from_holder: false, + counterparty_parameters: Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: msg.to_self_delay, + pubkeys: counterparty_pubkeys, + }), + funding_outpoint: None, + opt_anchors: if opt_anchors { Some(()) } else { None }, + opt_non_zero_fee_anchors: None + }, + funding_transaction: None, - counterparty_cur_commitment_point: Some(msg.first_per_commitment_point), - counterparty_prev_commitment_point: None, - counterparty_node_id, + counterparty_cur_commitment_point: Some(msg.first_per_commitment_point), + counterparty_prev_commitment_point: None, + counterparty_node_id, - counterparty_shutdown_scriptpubkey, + counterparty_shutdown_scriptpubkey, - commitment_secrets: CounterpartyCommitmentSecrets::new(), + commitment_secrets: CounterpartyCommitmentSecrets::new(), - channel_update_status: ChannelUpdateStatus::Enabled, - closing_signed_in_flight: false, + channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, - announcement_sigs: None, + announcement_sigs: None, - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), - workaround_lnd_bug_4006: None, - sent_message_awaiting_response: None, + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, - latest_inbound_scid_alias: None, - outbound_scid_alias, + latest_inbound_scid_alias: None, + outbound_scid_alias, - channel_pending_event_emitted: false, - channel_ready_event_emitted: false, + channel_pending_event_emitted: false, + channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills: HashSet::new(), - channel_type, - channel_keys_id, + channel_type, + channel_keys_id, - pending_monitor_updates: Vec::new(), + pending_monitor_updates: Vec::new(), + } }; Ok(chan) @@ -1556,16 +1565,16 @@ impl Channel { where L::Target: Logger { let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); - let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len(); + let num_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs); - let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis }; + let broadcaster_dust_limit_satoshis = if local { self.context.holder_dust_limit_satoshis } else { self.context.counterparty_dust_limit_satoshis }; let mut remote_htlc_total_msat = 0; let mut local_htlc_total_msat = 0; let mut value_to_self_msat_offset = 0; - let mut feerate_per_kw = self.feerate_per_kw; - if let Some((feerate, update_state)) = self.pending_update_fee { + let mut feerate_per_kw = self.context.feerate_per_kw; + if let Some((feerate, update_state)) = self.context.pending_update_fee { if match update_state { // Note that these match the inclusion criteria when scanning // pending_inbound_htlcs below. @@ -1580,7 +1589,7 @@ impl Channel { log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), - log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); + log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); macro_rules! get_htlc_in_commitment { ($htlc: expr, $offered: expr) => { @@ -1628,7 +1637,7 @@ impl Channel { } } - for ref htlc in self.pending_inbound_htlcs.iter() { + for ref htlc in self.context.pending_inbound_htlcs.iter() { let (include, state_name) = match htlc.state { InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"), InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"), @@ -1657,7 +1666,7 @@ impl Channel { let mut preimages: Vec = Vec::new(); - for ref htlc in self.pending_outbound_htlcs.iter() { + for ref htlc in self.context.pending_outbound_htlcs.iter() { let (include, state_name) = match htlc.state { OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"), OutboundHTLCState::Committed => (true, "Committed"), @@ -1696,13 +1705,13 @@ impl Channel { } } - let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; + let mut value_to_self_msat: i64 = (self.context.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; assert!(value_to_self_msat >= 0); // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to // "violate" their reserve value by couting those against it. Thus, we have to convert // everything to i64 before subtracting as otherwise we can overflow. - let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; + let mut value_to_remote_msat: i64 = (self.context.channel_value_satoshis * 1000) as i64 - (self.context.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; assert!(value_to_remote_msat >= 0); #[cfg(debug_assertions)] @@ -1710,18 +1719,18 @@ impl Channel { // Make sure that the to_self/to_remote is always either past the appropriate // channel_reserve *or* it is making progress towards it. let mut broadcaster_max_commitment_tx_output = if generated_by_local { - self.holder_max_commitment_tx_output.lock().unwrap() + self.context.holder_max_commitment_tx_output.lock().unwrap() } else { - self.counterparty_max_commitment_tx_output.lock().unwrap() + self.context.counterparty_max_commitment_tx_output.lock().unwrap() }; - debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64); + debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.context.counterparty_selected_channel_reserve_satoshis.unwrap() as i64); broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64); - debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64); + debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.context.holder_selected_channel_reserve_satoshis as i64); broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64); } - let total_fee_sat = Channel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some()); - let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; + let total_fee_sat = Channel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.context.channel_transaction_parameters.opt_anchors.is_some()); + let anchors_val = if self.context.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; let (value_to_self, value_to_remote) = if self.is_outbound() { (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) } else { @@ -1751,12 +1760,12 @@ impl Channel { let num_nondust_htlcs = included_non_dust_htlcs.len(); let channel_parameters = - if local { self.channel_transaction_parameters.as_holder_broadcastable() } - else { self.channel_transaction_parameters.as_counterparty_broadcastable() }; + if local { self.context.channel_transaction_parameters.as_holder_broadcastable() } + else { self.context.channel_transaction_parameters.as_counterparty_broadcastable() }; let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, value_to_a as u64, value_to_b as u64, - self.channel_transaction_parameters.opt_anchors.is_some(), + self.context.channel_transaction_parameters.opt_anchors.is_some(), funding_pubkey_a, funding_pubkey_b, keys.clone(), @@ -1790,7 +1799,7 @@ impl Channel { // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method // outside of those situations will fail. - self.shutdown_scriptpubkey.clone().unwrap().into_inner() + self.context.shutdown_scriptpubkey.clone().unwrap().into_inner() } #[inline] @@ -1822,13 +1831,13 @@ impl Channel { #[inline] fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) { - assert!(self.pending_inbound_htlcs.is_empty()); - assert!(self.pending_outbound_htlcs.is_empty()); - assert!(self.pending_update_fee.is_none()); + assert!(self.context.pending_inbound_htlcs.is_empty()); + assert!(self.context.pending_outbound_htlcs.is_empty()); + assert!(self.context.pending_update_fee.is_none()); let mut total_fee_satoshis = proposed_total_fee_satoshis; - let mut value_to_holder: i64 = (self.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 }; - let mut value_to_counterparty: i64 = ((self.channel_value_satoshis * 1000 - self.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 }; + let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 }; + let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 }; if value_to_holder < 0 { assert!(self.is_outbound()); @@ -1838,17 +1847,17 @@ impl Channel { total_fee_satoshis += (-value_to_counterparty) as u64; } - if skip_remote_output || value_to_counterparty as u64 <= self.holder_dust_limit_satoshis { + if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis { value_to_counterparty = 0; } - if value_to_holder as u64 <= self.holder_dust_limit_satoshis { + if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis { value_to_holder = 0; } - assert!(self.shutdown_scriptpubkey.is_some()); + assert!(self.context.shutdown_scriptpubkey.is_some()); let holder_shutdown_script = self.get_closing_scriptpubkey(); - let counterparty_shutdown_script = self.counterparty_shutdown_scriptpubkey.clone().unwrap(); + let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap(); let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint(); let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint); @@ -1856,7 +1865,7 @@ impl Channel { } fn funding_outpoint(&self) -> OutPoint { - self.channel_transaction_parameters.funding_outpoint.unwrap() + self.context.channel_transaction_parameters.funding_outpoint.unwrap() } #[inline] @@ -1866,12 +1875,12 @@ impl Channel { /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) /// TODO Some magic rust shit to compile-time check this? fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { - let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx); + let per_commitment_point = self.context.holder_signer.get_per_commitment_point(commitment_number, &self.context.secp_ctx); let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); - TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) + TxCreationKeys::derive_new(&self.context.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) } #[inline] @@ -1885,7 +1894,7 @@ impl Channel { let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); - TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint) + TxCreationKeys::derive_new(&self.context.secp_ctx, &self.context.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint) } /// Gets the redeemscript for the funding transaction output (ie the funding transaction output @@ -1908,10 +1917,10 @@ impl Channel { where L::Target: Logger { // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` // (see equivalent if condition there). - assert!(self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0); - let mon_update_id = self.latest_monitor_update_id; // Forget the ChannelMonitor update + assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0); + let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); - self.latest_monitor_update_id = mon_update_id; + self.context.latest_monitor_update_id = mon_update_id; if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. } @@ -1922,10 +1931,10 @@ impl Channel { // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, // either. - if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { panic!("Was asked to fulfill an HTLC when channel was not in an operational state"); } - assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()); @@ -1935,7 +1944,7 @@ impl Channel { let mut pending_idx = core::usize::MAX; let mut htlc_value_msat = 0; - for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() { + for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { assert_eq!(htlc.payment_hash, payment_hash_calc); match htlc.state { @@ -1962,7 +1971,7 @@ impl Channel { #[cfg(any(test, fuzzing))] // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } @@ -1970,27 +1979,27 @@ impl Channel { // // We have to put the payment_preimage in the channel_monitor right away here to ensure we // can claim it even if the channel hits the chain before we see their next commitment. - self.latest_monitor_update_id += 1; + self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, + update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_arg.clone(), }], }; - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { // Note that this condition is the same as the assertion in // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we // do not not get into this branch. - for pending_update in self.holding_cell_htlc_updates.iter() { + for pending_update in self.context.holding_cell_htlc_updates.iter() { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: - self.latest_monitor_update_id -= 1; + self.context.latest_monitor_update_id -= 1; #[cfg(any(test, fuzzing))] - debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } }, @@ -2006,25 +2015,25 @@ impl Channel { _ => {} } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.channel_state); - self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.context.channel_state); + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); #[cfg(any(test, fuzzing))] - self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); + self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } #[cfg(any(test, fuzzing))] - self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); + self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { - let htlc = &mut self.pending_inbound_htlcs[pending_idx]; + let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; if let InboundHTLCState::Committed = htlc.state { } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); + log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } @@ -2040,7 +2049,7 @@ impl Channel { } pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { - let release_cs_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked); + let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked); match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => { // Even if we aren't supposed to let new monitor updates with commitment state @@ -2052,28 +2061,28 @@ impl Channel { let mut additional_update = self.build_commitment_no_status_check(logger); // build_commitment_no_status_check may bump latest_monitor_id but we want them // to be strictly increasing by one, so decrement it here. - self.latest_monitor_update_id = monitor_update.update_id; + self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - self.pending_monitor_updates.push(PendingChannelMonitorUpdate { + self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { update: monitor_update, blocked: false, }); - self.pending_monitor_updates.len() - 1 + self.context.pending_monitor_updates.len() - 1 } else { - let insert_pos = self.pending_monitor_updates.iter().position(|upd| upd.blocked) - .unwrap_or(self.pending_monitor_updates.len()); - let new_mon_id = self.pending_monitor_updates.get(insert_pos) + let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked) + .unwrap_or(self.context.pending_monitor_updates.len()); + let new_mon_id = self.context.pending_monitor_updates.get(insert_pos) .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id); monitor_update.update_id = new_mon_id; - self.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate { + self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate { update: monitor_update, blocked: false, }); - for held_update in self.pending_monitor_updates.iter_mut().skip(insert_pos + 1) { + for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) { held_update.update.update_id += 1; } if msg.is_some() { debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); let update = self.build_commitment_no_status_check(logger); - self.pending_monitor_updates.push(PendingChannelMonitorUpdate { + self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { update, blocked: true, }); } @@ -2081,7 +2090,7 @@ impl Channel { }; self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new()); UpdateFulfillCommitFetch::NewClaim { - monitor_update: &self.pending_monitor_updates.get(unblocked_update_pos) + monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos) .expect("We just pushed the monitor update").update, htlc_value_msat, } @@ -2114,17 +2123,17 @@ impl Channel { /// [`ChannelError::Ignore`]. fn fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L) -> Result, ChannelError> where L::Target: Logger { - if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); } - assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); // ChannelManager may generate duplicate claims/fails due to HTLC update events from // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop // these, but for now we just have to treat them as normal. let mut pending_idx = core::usize::MAX; - for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() { + for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, @@ -2147,23 +2156,23 @@ impl Channel { #[cfg(any(test, fuzzing))] // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return Ok(None); } - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!"); force_holding_cell = true; } // Now update local state: if force_holding_cell { - for pending_update in self.holding_cell_htlc_updates.iter() { + for pending_update in self.context.holding_cell_htlc_updates.iter() { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { #[cfg(any(test, fuzzing))] - debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return Ok(None); } }, @@ -2177,7 +2186,7 @@ impl Channel { } } log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); - self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { htlc_id: htlc_id_arg, err_packet, }); @@ -2186,7 +2195,7 @@ impl Channel { log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); { - let htlc = &mut self.pending_inbound_htlcs[pending_idx]; + let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); } @@ -2200,29 +2209,29 @@ impl Channel { // Message handlers: pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits }; + let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; // Check sanity of message fields: if !self.is_outbound() { return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); } - if self.channel_state != ChannelState::OurInitSent as u32 { + if self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned())); } if msg.dust_limit_satoshis > 21000000 * 100000000 { return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis))); } - if msg.channel_reserve_satoshis > self.channel_value_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.channel_value_satoshis))); + if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { + return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); } - if msg.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis))); + if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); } - if msg.channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis { + if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - msg.channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis))); + msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); } - let full_channel_value_msat = (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; + let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; if msg.htlc_minimum_msat >= full_channel_value_msat { return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); } @@ -2261,7 +2270,7 @@ impl Channel { } if let Some(ty) = &msg.channel_type { - if *ty != self.channel_type { + if *ty != self.context.channel_type { return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); } } else if their_features.supports_channel_type() { @@ -2271,7 +2280,7 @@ impl Channel { if channel_type != ChannelTypeFeatures::only_static_remote_key() { return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); } - self.channel_type = channel_type; + self.context.channel_type = channel_type; } let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { @@ -2294,16 +2303,16 @@ impl Channel { } } else { None }; - self.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; - self.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000); - self.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; - self.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; + self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; + self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); + self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); + self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; + self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; if peer_limits.trust_own_funding_0conf { - self.minimum_depth = Some(msg.minimum_depth); + self.context.minimum_depth = Some(msg.minimum_depth); } else { - self.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); + self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); } let counterparty_pubkeys = ChannelPublicKeys { @@ -2314,16 +2323,16 @@ impl Channel { htlc_basepoint: msg.htlc_basepoint }; - self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { selected_contest_delay: msg.to_self_delay, pubkeys: counterparty_pubkeys, }); - self.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); - self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); + self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; - self.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; - self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; + self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. Ok(()) } @@ -2331,29 +2340,29 @@ impl Channel { fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { let funding_script = self.get_funding_redeemscript(); - let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); // They sign the holder commitment transaction... log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); - secp_check!(self.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); } let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - let counterparty_signature = self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx) + let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. @@ -2374,31 +2383,31 @@ impl Channel { if self.is_outbound() { return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); } - if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT // remember the channel, so it's safe to just send an error_message here and drop the // channel. return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned())); } - if self.inbound_awaiting_accept { + if self.context.inbound_awaiting_accept { return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned())); } - if self.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; - self.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); // This is an externally observable change before we finish all our checks. In particular // funding_created_signature may fail. - self.holder_signer.provide_channel_parameters(&self.channel_transaction_parameters); + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { Ok(res) => res, Err(ChannelError::Close(e)) => { - self.channel_transaction_parameters.funding_outpoint = None; + self.context.channel_transaction_parameters.funding_outpoint = None; return Err(ChannelError::Close(e)); }, Err(e) => { @@ -2416,7 +2425,7 @@ impl Channel { self.counterparty_funding_pubkey() ); - self.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) + self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; // Now that we're past error-generating stuff, update our local state: @@ -2424,23 +2433,23 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); - let shutdown_script = self.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.channel_value_satoshis, self.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), monitor_signer, + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, shutdown_script, self.get_holder_selected_contest_delay(), - &self.destination_script, (funding_txo, funding_txo_script.clone()), - &self.channel_transaction_parameters, - funding_redeemscript.clone(), self.channel_value_satoshis, + &self.context.destination_script, (funding_txo, funding_txo_script.clone()), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, - holder_commitment_tx, best_block, self.counterparty_node_id); + holder_commitment_tx, best_block, self.context.counterparty_node_id); - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger); + channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); - self.channel_state = ChannelState::FundingSent as u32; - self.channel_id = funding_txo.to_channel_id(); - self.cur_counterparty_commitment_transaction_number -= 1; - self.cur_holder_commitment_transaction_number -= 1; + self.context.channel_state = ChannelState::FundingSent as u32; + self.context.channel_id = funding_txo.to_channel_id(); + self.context.cur_counterparty_commitment_transaction_number -= 1; + self.context.cur_holder_commitment_transaction_number -= 1; log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); @@ -2448,7 +2457,7 @@ impl Channel { self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((msgs::FundingSigned { - channel_id: self.channel_id, + channel_id: self.context.channel_id, signature, #[cfg(taproot)] partial_signature_with_nonce: None, @@ -2467,33 +2476,33 @@ impl Channel { if !self.is_outbound() { return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); } - if self.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { + if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned())); } - if self.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } let funding_script = self.get_funding_redeemscript(); let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); // They sign our commitment transaction, allowing us to broadcast the tx if we wish. - if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); } } @@ -2506,7 +2515,7 @@ impl Channel { self.counterparty_funding_pubkey() ); - self.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) + self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; @@ -2514,23 +2523,23 @@ impl Channel { let funding_txo = self.get_funding_txo().unwrap(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); - let shutdown_script = self.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.channel_value_satoshis, self.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), monitor_signer, + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, shutdown_script, self.get_holder_selected_contest_delay(), - &self.destination_script, (funding_txo, funding_txo_script), - &self.channel_transaction_parameters, - funding_redeemscript.clone(), self.channel_value_satoshis, + &self.context.destination_script, (funding_txo, funding_txo_script), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, - holder_commitment_tx, best_block, self.counterparty_node_id); + holder_commitment_tx, best_block, self.context.counterparty_node_id); - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger); + channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); - assert_eq!(self.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! - self.channel_state = ChannelState::FundingSent as u32; - self.cur_holder_commitment_transaction_number -= 1; - self.cur_counterparty_commitment_transaction_number -= 1; + assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! + self.context.channel_state = ChannelState::FundingSent as u32; + self.context.cur_holder_commitment_transaction_number -= 1; + self.context.cur_counterparty_commitment_transaction_number -= 1; log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); @@ -2550,49 +2559,49 @@ impl Channel { NS::Target: NodeSigner, L::Target: Logger { - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - self.workaround_lnd_bug_4006 = Some(msg.clone()); + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + self.context.workaround_lnd_bug_4006 = Some(msg.clone()); return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); } if let Some(scid_alias) = msg.short_channel_id_alias { - if Some(scid_alias) != self.short_channel_id { + if Some(scid_alias) != self.context.short_channel_id { // The scid alias provided can be used to route payments *from* our counterparty, // i.e. can be used for inbound payments and provided in invoices, but is not used // when routing outbound payments. - self.latest_inbound_scid_alias = Some(scid_alias); + self.context.latest_inbound_scid_alias = Some(scid_alias); } } - let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); + let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); if non_shutdown_state == ChannelState::FundingSent as u32 { - self.channel_state |= ChannelState::TheirChannelReady as u32; + self.context.channel_state |= ChannelState::TheirChannelReady as u32; } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { - self.channel_state = ChannelState::ChannelReady as u32 | (self.channel_state & MULTI_STATE_FLAGS); - self.update_time_counter += 1; - } else if self.channel_state & (ChannelState::ChannelReady as u32) != 0 || + self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); + self.context.update_time_counter += 1; + } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 || // If we reconnected before sending our `channel_ready` they may still resend theirs: - (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) == + (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32)) { // They probably disconnected/reconnected and re-sent the channel_ready, which is // required, or they're sending a fresh SCID alias. let expected_point = - if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { + if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { // If they haven't ever sent an updated point, the point they send should match // the current one. - self.counterparty_cur_commitment_point - } else if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 { + self.context.counterparty_cur_commitment_point + } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 { // If we've advanced the commitment number once, the second commitment point is // at `counterparty_prev_commitment_point`, which is not yet revoked. - debug_assert!(self.counterparty_prev_commitment_point.is_some()); - self.counterparty_prev_commitment_point + debug_assert!(self.context.counterparty_prev_commitment_point.is_some()); + self.context.counterparty_prev_commitment_point } else { // If they have sent updated points, channel_ready is always supposed to match // their "first" point, which we re-derive here. - Some(PublicKey::from_secret_key(&self.secp_ctx, &SecretKey::from_slice( - &self.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available") + Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice( + &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available") ).expect("We already advanced, so previous secret keys should have been validated already"))) }; if expected_point != Some(msg.next_per_commitment_point) { @@ -2603,8 +2612,8 @@ impl Channel { return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())); } - self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point; - self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); + self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; + self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id())); @@ -2613,8 +2622,8 @@ impl Channel { /// Returns transaction if there is pending funding transaction that is yet to broadcast pub fn unbroadcasted_funding(&self) -> Option { - if self.channel_state & (ChannelState::FundingCreated as u32) != 0 { - self.funding_transaction.clone() + if self.context.channel_state & (ChannelState::FundingCreated as u32) != 0 { + self.context.funding_transaction.clone() } else { None } @@ -2623,7 +2632,7 @@ impl Channel { /// Returns a HTLCStats about inbound pending htlcs fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { let mut stats = HTLCStats { - pending_htlcs: self.pending_inbound_htlcs.len() as u32, + pending_htlcs: self.context.pending_inbound_htlcs.len() as u32, pending_htlcs_value_msat: 0, on_counterparty_tx_dust_exposure_msat: 0, on_holder_tx_dust_exposure_msat: 0, @@ -2638,9 +2647,9 @@ impl Channel { (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; - let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis; - for ref htlc in self.pending_inbound_htlcs.iter() { + let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; + for ref htlc in self.context.pending_inbound_htlcs.iter() { stats.pending_htlcs_value_msat += htlc.amount_msat; if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; @@ -2655,7 +2664,7 @@ impl Channel { /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { let mut stats = HTLCStats { - pending_htlcs: self.pending_outbound_htlcs.len() as u32, + pending_htlcs: self.context.pending_outbound_htlcs.len() as u32, pending_htlcs_value_msat: 0, on_counterparty_tx_dust_exposure_msat: 0, on_holder_tx_dust_exposure_msat: 0, @@ -2670,9 +2679,9 @@ impl Channel { (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; - let counterparty_dust_limit_success_sat = htlc_success_dust_limit + self.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis; - for ref htlc in self.pending_outbound_htlcs.iter() { + let counterparty_dust_limit_success_sat = htlc_success_dust_limit + self.context.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.holder_dust_limit_satoshis; + for ref htlc in self.context.pending_outbound_htlcs.iter() { stats.pending_htlcs_value_msat += htlc.amount_msat; if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; @@ -2682,7 +2691,7 @@ impl Channel { } } - for update in self.holding_cell_htlc_updates.iter() { + for update in self.context.holding_cell_htlc_updates.iter() { if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { stats.pending_htlcs += 1; stats.pending_htlcs_value_msat += amount_msat; @@ -2709,18 +2718,18 @@ impl Channel { let inbound_stats = self.get_inbound_pending_htlc_stats(None); let outbound_stats = self.get_outbound_pending_htlc_stats(None); - let mut balance_msat = self.value_to_self_msat; - for ref htlc in self.pending_inbound_htlcs.iter() { + let mut balance_msat = self.context.value_to_self_msat; + for ref htlc in self.context.pending_inbound_htlcs.iter() { if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { balance_msat += htlc.amount_msat; } } balance_msat -= outbound_stats.pending_htlcs_value_msat; - let outbound_capacity_msat = self.value_to_self_msat + let outbound_capacity_msat = self.context.value_to_self_msat .saturating_sub(outbound_stats.pending_htlcs_value_msat) .saturating_sub( - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); + self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); let mut available_capacity_msat = outbound_capacity_msat; @@ -2732,9 +2741,9 @@ impl Channel { // and the answer will in turn change the amount itself — making it a circular // dependency. // This complicates the computation around dust-values, up to the one-htlc-value. - let mut real_dust_limit_timeout_sat = self.holder_dust_limit_satoshis; + let mut real_dust_limit_timeout_sat = self.context.holder_dust_limit_satoshis; if !self.opt_anchors() { - real_dust_limit_timeout_sat += self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; + real_dust_limit_timeout_sat += self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered); @@ -2758,16 +2767,16 @@ impl Channel { } else { // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure // sending a new HTLC won't reduce their balance below our reserve threshold. - let mut real_dust_limit_success_sat = self.counterparty_dust_limit_satoshis; + let mut real_dust_limit_success_sat = self.context.counterparty_dust_limit_satoshis; if !self.opt_anchors() { - real_dust_limit_success_sat += self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; + real_dust_limit_success_sat += self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered); let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None); - let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000; - let remote_balance_msat = (self.channel_value_satoshis * 1000 - self.value_to_self_msat) + let holder_selected_chan_reserve_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; + let remote_balance_msat = (self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) .saturating_sub(inbound_stats.pending_htlcs_value_msat); if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat { @@ -2777,7 +2786,7 @@ impl Channel { } } - let mut next_outbound_htlc_minimum_msat = self.counterparty_htlc_minimum_msat; + let mut next_outbound_htlc_minimum_msat = self.context.counterparty_htlc_minimum_msat; // If we get close to our maximum dust exposure, we end up in a situation where we can send // between zero and the remaining dust exposure limit remaining OR above the dust limit. @@ -2787,11 +2796,11 @@ impl Channel { let mut dust_exposure_dust_limit_msat = 0; let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { - (self.counterparty_dust_limit_satoshis, self.holder_dust_limit_satoshis) + (self.context.counterparty_dust_limit_satoshis, self.context.holder_dust_limit_satoshis) } else { let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; - (self.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, - self.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) + (self.context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, + self.context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) }; let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 { @@ -2817,17 +2826,17 @@ impl Channel { } available_capacity_msat = cmp::min(available_capacity_msat, - self.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); + self.context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); - if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 { + if outbound_stats.pending_htlcs + 1 > self.context.counterparty_max_accepted_htlcs as u32 { available_capacity_msat = 0; } AvailableBalances { - inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000 - - self.value_to_self_msat as i64 + inbound_capacity_msat: cmp::max(self.context.channel_value_satoshis as i64 * 1000 + - self.context.value_to_self_msat as i64 - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - - self.holder_selected_channel_reserve_satoshis as i64 * 1000, + - self.context.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, outbound_capacity_msat, next_outbound_htlc_limit_msat: available_capacity_msat, @@ -2837,7 +2846,7 @@ impl Channel { } pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { - (self.holder_selected_channel_reserve_satoshis, self.counterparty_selected_channel_reserve_satoshis) + (self.context.holder_selected_channel_reserve_satoshis, self.context.counterparty_selected_channel_reserve_satoshis) } // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. @@ -2871,11 +2880,11 @@ impl Channel { let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { (0, 0) } else { - (self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) }; - let real_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis; + let real_dust_limit_success_sat = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.holder_dust_limit_satoshis; let mut addl_htlcs = 0; if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } @@ -2893,7 +2902,7 @@ impl Channel { } let mut included_htlcs = 0; - for ref htlc in self.pending_inbound_htlcs.iter() { + for ref htlc in self.context.pending_inbound_htlcs.iter() { if htlc.amount_msat / 1000 < real_dust_limit_success_sat { continue } @@ -2902,7 +2911,7 @@ impl Channel { included_htlcs += 1; } - for ref htlc in self.pending_outbound_htlcs.iter() { + for ref htlc in self.context.pending_outbound_htlcs.iter() { if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { continue } @@ -2917,7 +2926,7 @@ impl Channel { } } - for htlc in self.holding_cell_htlc_updates.iter() { + for htlc in self.context.holding_cell_htlc_updates.iter() { match htlc { &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => { if amount_msat / 1000 < real_dust_limit_timeout_sat { @@ -2931,29 +2940,29 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs, self.opt_anchors()); + let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); + fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); } - let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len() - + self.holding_cell_htlc_updates.len(); + let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len() + + self.context.holding_cell_htlc_updates.len(); let commitment_tx_info = CommitmentTxInfoCached { fee, total_pending_htlcs, next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => self.next_holder_htlc_id, + HTLCInitiator::LocalOffered => self.context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => self.context.next_holder_htlc_id, }, next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => self.next_counterparty_htlc_id + 1, + HTLCInitiator::LocalOffered => self.context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => self.context.next_counterparty_htlc_id + 1, }, - feerate: self.feerate_per_kw, + feerate: self.context.feerate_per_kw, }; - *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); } res } @@ -2974,11 +2983,11 @@ impl Channel { let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { (0, 0) } else { - (self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) }; - let real_dust_limit_success_sat = htlc_success_dust_limit + self.counterparty_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.counterparty_dust_limit_satoshis; + let real_dust_limit_success_sat = htlc_success_dust_limit + self.context.counterparty_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; let mut addl_htlcs = 0; if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } @@ -2999,14 +3008,14 @@ impl Channel { // non-dust inbound HTLCs are included (as all states imply it will be included) and only // committed outbound HTLCs, see below. let mut included_htlcs = 0; - for ref htlc in self.pending_inbound_htlcs.iter() { + for ref htlc in self.context.pending_inbound_htlcs.iter() { if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { continue } included_htlcs += 1; } - for ref htlc in self.pending_outbound_htlcs.iter() { + for ref htlc in self.context.pending_outbound_htlcs.iter() { if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { continue } @@ -3021,28 +3030,28 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs, self.opt_anchors()); + let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); + fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); } - let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len(); + let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); let commitment_tx_info = CommitmentTxInfoCached { fee, total_pending_htlcs, next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => self.next_holder_htlc_id, + HTLCInitiator::LocalOffered => self.context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => self.context.next_holder_htlc_id, }, next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => self.next_counterparty_htlc_id + 1, + HTLCInitiator::LocalOffered => self.context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => self.context.next_counterparty_htlc_id + 1, }, - feerate: self.feerate_per_kw, + feerate: self.context.feerate_per_kw, }; - *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); } res } @@ -3050,35 +3059,35 @@ impl Channel { pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { // We can't accept HTLCs sent after we've sent a shutdown. - let local_sent_shutdown = (self.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32); + let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32); if local_sent_shutdown { pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); } // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. - let remote_sent_shutdown = (self.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32); + let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32); if remote_sent_shutdown { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned())); } - if msg.amount_msat > self.channel_value_satoshis * 1000 { + if msg.amount_msat > self.context.channel_value_satoshis * 1000 { return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned())); } if msg.amount_msat == 0 { return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned())); } - if msg.amount_msat < self.holder_htlc_minimum_msat { - return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.holder_htlc_minimum_msat, msg.amount_msat))); + if msg.amount_msat < self.context.holder_htlc_minimum_msat { + return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); } let inbound_stats = self.get_inbound_pending_htlc_stats(None); let outbound_stats = self.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > self.holder_max_accepted_htlcs as u32 { - return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.holder_max_accepted_htlcs))); + if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { + return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); } - if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.holder_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.holder_max_htlc_value_in_flight_msat))); + if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { + return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); } // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet // the reserve_satoshis we told them to always have as direct payment so that they lose @@ -3093,7 +3102,7 @@ impl Channel { // Channel state once they will not be present in the next received commitment // transaction). let mut removed_outbound_total_msat = 0; - for ref htlc in self.pending_outbound_htlcs.iter() { + for ref htlc in self.context.pending_outbound_htlcs.iter() { if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state { removed_outbound_total_msat += htlc.amount_msat; } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { @@ -3108,7 +3117,7 @@ impl Channel { (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; - let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.counterparty_dust_limit_satoshis; + let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { @@ -3118,7 +3127,7 @@ impl Channel { } } - let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.holder_dust_limit_satoshis; + let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { @@ -3129,9 +3138,9 @@ impl Channel { } let pending_value_to_self_msat = - self.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; + self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; let pending_remote_value_msat = - self.channel_value_satoshis * 1000 - pending_value_to_self_msat; + self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; if pending_remote_value_msat < msg.amount_msat { return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned())); } @@ -3146,7 +3155,7 @@ impl Channel { return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); }; - if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.holder_selected_channel_reserve_satoshis * 1000 { + if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 { return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); } @@ -3161,7 +3170,7 @@ impl Channel { // sensitive to fee spikes. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if pending_remote_value_msat - msg.amount_msat - self.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { + if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id())); @@ -3171,26 +3180,26 @@ impl Channel { // Check that they won't violate our local required channel reserve by adding this HTLC. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(htlc_candidate, None); - if self.value_to_self_msat < self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat { + if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat { return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned())); } } - if self.next_counterparty_htlc_id != msg.htlc_id { - return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.next_counterparty_htlc_id))); + if self.context.next_counterparty_htlc_id != msg.htlc_id { + return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id))); } if msg.cltv_expiry >= 500000000 { return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned())); } - if self.channel_state & ChannelState::LocalShutdownSent as u32 != 0 { + if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 { if let PendingHTLCStatus::Forward(_) = pending_forward_status { panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing"); } } // Now update local state: - self.next_counterparty_htlc_id += 1; - self.pending_inbound_htlcs.push(InboundHTLCOutput { + self.context.next_counterparty_htlc_id += 1; + self.context.pending_inbound_htlcs.push(InboundHTLCOutput { htlc_id: msg.htlc_id, amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, @@ -3204,7 +3213,7 @@ impl Channel { #[inline] fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option, fail_reason: Option) -> Result<&OutboundHTLCOutput, ChannelError> { assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage"); - for htlc in self.pending_outbound_htlcs.iter_mut() { + for htlc in self.context.pending_outbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { let outcome = match check_preimage { None => fail_reason.into(), @@ -3232,10 +3241,10 @@ impl Channel { } pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> { - if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); } @@ -3243,10 +3252,10 @@ impl Channel { } pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned())); } @@ -3255,10 +3264,10 @@ impl Channel { } pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned())); } @@ -3269,31 +3278,31 @@ impl Channel { pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> where L::Target: Logger { - if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())); } - if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() { + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); } let funding_script = self.get_funding_redeemscript(); - let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number); + let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger); + let commitment_stats = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); let commitment_txid = { let trusted_tx = commitment_stats.tx.trust(); let bitcoin_tx = trusted_tx.built_transaction(); - let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); + let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); - if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); } bitcoin_tx.txid @@ -3302,12 +3311,12 @@ impl Channel { // If our counterparty updated the channel fee in this commitment transaction, check that // they can actually afford the new fee now. - let update_fee = if let Some((_, update_state)) = self.pending_update_fee { + let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee { update_state == FeeUpdateState::RemoteAnnounced } else { false }; if update_fee { debug_assert!(!self.is_outbound()); - let counterparty_reserve_we_require_msat = self.holder_selected_channel_reserve_satoshis * 1000; + let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())); } @@ -3315,15 +3324,15 @@ impl Channel { #[cfg(any(test, fuzzing))] { if self.is_outbound() { - let projected_commit_tx_info = self.next_local_commitment_tx_fee_info_cached.lock().unwrap().take(); - *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; + let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take(); + *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; if let Some(info) = projected_commit_tx_info { - let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len() - + self.holding_cell_htlc_updates.len(); + let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len() + + self.context.holding_cell_htlc_updates.len(); if info.total_pending_htlcs == total_pending_htlcs - && info.next_holder_htlc_id == self.next_holder_htlc_id - && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id - && info.feerate == self.feerate_per_kw { + && info.next_holder_htlc_id == self.context.next_holder_htlc_id + && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id + && info.feerate == self.context.feerate_per_kw { assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000); } } @@ -3363,7 +3372,7 @@ impl Channel { log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id())); - if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); } if !separate_nondust_htlc_sources { @@ -3388,34 +3397,34 @@ impl Channel { self.counterparty_funding_pubkey() ); - self.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) + self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; // Update state now that we've passed all the can-fail calls... let mut need_commitment = false; - if let &mut Some((_, ref mut update_state)) = &mut self.pending_update_fee { + if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee { if *update_state == FeeUpdateState::RemoteAnnounced { *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce; need_commitment = true; } } - for htlc in self.pending_inbound_htlcs.iter_mut() { + for htlc in self.context.pending_inbound_htlcs.iter_mut() { let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { Some(forward_info.clone()) } else { None }; if let Some(forward_info) = new_forward { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", - log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); + log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); need_commitment = true; } } let mut claimed_htlcs = Vec::new(); - for htlc in self.pending_outbound_htlcs.iter_mut() { + for htlc in self.context.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", - log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); + log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); @@ -3433,9 +3442,9 @@ impl Channel { } } - self.latest_monitor_update_id += 1; + self.context.latest_monitor_update_id += 1; let mut monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, + update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx: holder_commitment_tx, htlc_outputs: htlcs_and_sigs, @@ -3444,39 +3453,39 @@ impl Channel { }] }; - self.cur_holder_commitment_transaction_number -= 1; + self.context.cur_holder_commitment_transaction_number -= 1; // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. - self.resend_order = RAACommitmentOrder::CommitmentFirst; + self.context.resend_order = RAACommitmentOrder::CommitmentFirst; - if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 { + if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 { // In case we initially failed monitor updating without requiring a response, we need // to make sure the RAA gets sent first. - self.monitor_pending_revoke_and_ack = true; - if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + self.context.monitor_pending_revoke_and_ack = true; + if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { // If we were going to send a commitment_signed after the RAA, go ahead and do all // the corresponding HTLC status updates so that get_last_commitment_update // includes the right HTLCs. - self.monitor_pending_commitment_signed = true; + self.context.monitor_pending_commitment_signed = true; let mut additional_update = self.build_commitment_no_status_check(logger); // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. - self.latest_monitor_update_id = monitor_update.update_id; + self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", - log_bytes!(self.channel_id)); + log_bytes!(self.context.channel_id)); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } - let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - // we'll send one right away when we get the revoke_and_ack when we // free_holding_cell_htlcs(). let mut additional_update = self.build_commitment_no_status_check(logger); // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. - self.latest_monitor_update_id = monitor_update.update_id; + self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); true } else { false }; @@ -3491,8 +3500,8 @@ impl Channel { /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and /// returns `(None, Vec::new())`. pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { - if self.channel_state >= ChannelState::ChannelReady as u32 && - (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { + if self.context.channel_state >= ChannelState::ChannelReady as u32 && + (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { self.free_holding_cell_htlcs(logger) } else { (None, Vec::new()) } } @@ -3500,18 +3509,18 @@ impl Channel { /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. fn free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { - assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); - if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() { - log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(), - if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id())); + assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); + if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { + log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), + if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id())); let mut monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id + 1, // We don't increment this yet! + update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! updates: Vec::new(), }; let mut htlc_updates = Vec::new(); - mem::swap(&mut htlc_updates, &mut self.holding_cell_htlc_updates); + mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates); let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len()); let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len()); let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len()); @@ -3579,10 +3588,10 @@ impl Channel { }, } } - if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() { + if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() { return (None, htlcs_to_fail); } - let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() { + let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() { self.send_update_fee(feerate, false, logger) } else { None @@ -3591,7 +3600,7 @@ impl Channel { let mut additional_update = self.build_commitment_no_status_check(logger); // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id // but we want them to be strictly increasing by one, so reset it here. - self.latest_monitor_update_id = monitor_update.update_id; + self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", @@ -3613,25 +3622,25 @@ impl Channel { pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError> where L::Target: Logger, { - if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned())); } - if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() { + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned())); } let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned()); - if let Some(counterparty_prev_commitment_point) = self.counterparty_prev_commitment_point { - if PublicKey::from_secret_key(&self.secp_ctx, &secret) != counterparty_prev_commitment_point { + if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point { + if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point { return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned())); } } - if self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 { + if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 { // Our counterparty seems to have burned their coins to us (by revoking a state when we // haven't given them a new commitment transaction to broadcast). We should probably // take advantage of this by updating our channel monitor, sending them an error, and @@ -3644,22 +3653,22 @@ impl Channel { #[cfg(any(test, fuzzing))] { - *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; - *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; + *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; + *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; } - self.holder_signer.validate_counterparty_revocation( - self.cur_counterparty_commitment_transaction_number + 1, + self.context.holder_signer.validate_counterparty_revocation( + self.context.cur_counterparty_commitment_transaction_number + 1, &secret ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?; - self.commitment_secrets.provide_secret(self.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret) + self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret) .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?; - self.latest_monitor_update_id += 1; + self.context.latest_monitor_update_id += 1; let mut monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, + update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::CommitmentSecret { - idx: self.cur_counterparty_commitment_transaction_number + 1, + idx: self.context.cur_counterparty_commitment_transaction_number + 1, secret: msg.per_commitment_secret, }], }; @@ -3668,14 +3677,14 @@ impl Channel { // (note that we may still fail to generate the new commitment_signed message, but that's // OK, we step the channel here and *then* if the new generation fails we can fail the // channel based on that, but stepping stuff here should be safe either way. - self.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32); - self.sent_message_awaiting_response = None; - self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point; - self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - self.cur_counterparty_commitment_transaction_number -= 1; + self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32); + self.context.sent_message_awaiting_response = None; + self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; + self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); + self.context.cur_counterparty_commitment_transaction_number -= 1; - if self.announcement_sigs_state == AnnouncementSigsState::Committed { - self.announcement_sigs_state = AnnouncementSigsState::PeerReceived; + if self.context.announcement_sigs_state == AnnouncementSigsState::Committed { + self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id())); @@ -3688,9 +3697,9 @@ impl Channel { let mut value_to_self_msat_diff: i64 = 0; { - // Take references explicitly so that we can hold multiple references to self. - let pending_inbound_htlcs: &mut Vec<_> = &mut self.pending_inbound_htlcs; - let pending_outbound_htlcs: &mut Vec<_> = &mut self.pending_outbound_htlcs; + // Take references explicitly so that we can hold multiple references to self.context. + let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs; + let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs; // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug) pending_inbound_htlcs.retain(|htlc| { @@ -3769,54 +3778,54 @@ impl Channel { } } } - self.value_to_self_msat = (self.value_to_self_msat as i64 + value_to_self_msat_diff) as u64; + self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64; - if let Some((feerate, update_state)) = self.pending_update_fee { + if let Some((feerate, update_state)) = self.context.pending_update_fee { match update_state { FeeUpdateState::Outbound => { debug_assert!(self.is_outbound()); log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate); - self.feerate_per_kw = feerate; - self.pending_update_fee = None; + self.context.feerate_per_kw = feerate; + self.context.pending_update_fee = None; }, FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); }, FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); require_commitment = true; - self.feerate_per_kw = feerate; - self.pending_update_fee = None; + self.context.feerate_per_kw = feerate; + self.context.pending_update_fee = None; }, } } - if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 { + if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 { // We can't actually generate a new commitment transaction (incl by freeing holding // cells) while we can't update the monitor, so we just return what we have. if require_commitment { - self.monitor_pending_commitment_signed = true; + self.context.monitor_pending_commitment_signed = true; // When the monitor updating is restored we'll call get_last_commitment_update(), // which does not update state, but we're definitely now awaiting a remote revoke // before we can step forward any more, so set it here. let mut additional_update = self.build_commitment_no_status_check(logger); // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. - self.latest_monitor_update_id = monitor_update.update_id; + self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } - self.monitor_pending_forwards.append(&mut to_forward_infos); - self.monitor_pending_failures.append(&mut revoked_htlcs); - self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); + self.context.monitor_pending_forwards.append(&mut to_forward_infos); + self.context.monitor_pending_failures.append(&mut revoked_htlcs); + self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); } match self.free_holding_cell_htlcs(logger) { (Some(_), htlcs_to_fail) => { - let mut additional_update = self.pending_monitor_updates.pop().unwrap().update; + let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update; // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be // strictly increasing by one, so decrement it here. - self.latest_monitor_update_id = monitor_update.update_id; + self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); @@ -3828,7 +3837,7 @@ impl Channel { // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. - self.latest_monitor_update_id = monitor_update.update_id; + self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", @@ -3873,11 +3882,11 @@ impl Channel { // Before proposing a feerate update, check that we can actually afford the new fee. let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); - let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number); - let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger); + let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let commitment_stats = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); let buffer_fee_msat = Channel::::commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.opt_anchors()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; - if holder_balance_msat < buffer_fee_msat + self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { + if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); return None; @@ -3895,20 +3904,20 @@ impl Channel { return None; } - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { force_holding_cell = true; } if force_holding_cell { - self.holding_cell_update_fee = Some(feerate_per_kw); + self.context.holding_cell_update_fee = Some(feerate_per_kw); return None; } - debug_assert!(self.pending_update_fee.is_none()); - self.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound)); + debug_assert!(self.context.pending_update_fee.is_none()); + self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound)); Some(msgs::UpdateFee { - channel_id: self.channel_id, + channel_id: self.context.channel_id, feerate_per_kw, }) } @@ -3919,30 +3928,30 @@ impl Channel { /// No further message handling calls may be made until a channel_reestablish dance has /// completed. pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) where L::Target: Logger { - assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.channel_state < ChannelState::FundingSent as u32 { - self.channel_state = ChannelState::ShutdownComplete as u32; + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); + if self.context.channel_state < ChannelState::FundingSent as u32 { + self.context.channel_state = ChannelState::ShutdownComplete as u32; return; } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) { // While the below code should be idempotent, it's simpler to just return early, as // redundant disconnect events can fire, though they should be rare. return; } - if self.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.announcement_sigs_state == AnnouncementSigsState::Committed { - self.announcement_sigs_state = AnnouncementSigsState::NotSent; + if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed { + self.context.announcement_sigs_state = AnnouncementSigsState::NotSent; } // Upon reconnect we have to start the closing_signed dance over, but shutdown messages // will be retransmitted. - self.last_sent_closing_fee = None; - self.pending_counterparty_closing_signed = None; - self.closing_fee_limits = None; + self.context.last_sent_closing_fee = None; + self.context.pending_counterparty_closing_signed = None; + self.context.closing_fee_limits = None; let mut inbound_drop_count = 0; - self.pending_inbound_htlcs.retain(|htlc| { + self.context.pending_inbound_htlcs.retain(|htlc| { match htlc.state { InboundHTLCState::RemoteAnnounced(_) => { // They sent us an update_add_htlc but we never got the commitment_signed. @@ -3967,16 +3976,16 @@ impl Channel { }, } }); - self.next_counterparty_htlc_id -= inbound_drop_count; + self.context.next_counterparty_htlc_id -= inbound_drop_count; - if let Some((_, update_state)) = self.pending_update_fee { + if let Some((_, update_state)) = self.context.pending_update_fee { if update_state == FeeUpdateState::RemoteAnnounced { debug_assert!(!self.is_outbound()); - self.pending_update_fee = None; + self.context.pending_update_fee = None; } } - for htlc in self.pending_outbound_htlcs.iter_mut() { + for htlc in self.context.pending_outbound_htlcs.iter_mut() { if let OutboundHTLCState::RemoteRemoved(_) = htlc.state { // They sent us an update to remove this but haven't yet sent the corresponding // commitment_signed, we need to move it back to Committed and they can re-send @@ -3985,9 +3994,9 @@ impl Channel { } } - self.sent_message_awaiting_response = None; + self.context.sent_message_awaiting_response = None; - self.channel_state |= ChannelState::PeerDisconnected as u32; + self.context.channel_state |= ChannelState::PeerDisconnected as u32; log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); } @@ -4007,13 +4016,13 @@ impl Channel { mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, mut pending_finalized_claimed_htlcs: Vec ) { - self.monitor_pending_revoke_and_ack |= resend_raa; - self.monitor_pending_commitment_signed |= resend_commitment; - self.monitor_pending_channel_ready |= resend_channel_ready; - self.monitor_pending_forwards.append(&mut pending_forwards); - self.monitor_pending_failures.append(&mut pending_fails); - self.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs); - self.channel_state |= ChannelState::MonitorUpdateInProgress as u32; + self.context.monitor_pending_revoke_and_ack |= resend_raa; + self.context.monitor_pending_commitment_signed |= resend_commitment; + self.context.monitor_pending_channel_ready |= resend_channel_ready; + self.context.monitor_pending_forwards.append(&mut pending_forwards); + self.context.monitor_pending_failures.append(&mut pending_fails); + self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs); + self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32; } /// Indicates that the latest ChannelMonitor update has been committed by the client @@ -4027,10 +4036,10 @@ impl Channel { L::Target: Logger, NS::Target: NodeSigner { - assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); - self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); + assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); + self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); let mut found_blocked = false; - self.pending_monitor_updates.retain(|upd| { + self.context.pending_monitor_updates.retain(|upd| { if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); } if upd.blocked { found_blocked = true; } upd.blocked @@ -4040,12 +4049,12 @@ impl Channel { // (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. let mut funding_broadcastable = - if self.is_outbound() && self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { - self.funding_transaction.take() + if self.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { + self.context.funding_transaction.take() } else { None }; // That said, if the funding transaction is already confirmed (ie we're active with a // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx. - if self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.minimum_depth != Some(0) { + if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { funding_broadcastable = None; } @@ -4055,47 +4064,47 @@ impl Channel { // * an inbound channel that failed to persist the monitor on funding_created and we got // the funding transaction confirmed before the monitor was persisted, or // * a 0-conf channel and intended to send the channel_ready before any broadcast at all. - let channel_ready = if self.monitor_pending_channel_ready { - assert!(!self.is_outbound() || self.minimum_depth == Some(0), + let channel_ready = if self.context.monitor_pending_channel_ready { + assert!(!self.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); - self.monitor_pending_channel_ready = false; - let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); + self.context.monitor_pending_channel_ready = false; + let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); Some(msgs::ChannelReady { channel_id: self.channel_id(), next_per_commitment_point, - short_channel_id_alias: Some(self.outbound_scid_alias), + short_channel_id_alias: Some(self.context.outbound_scid_alias), }) } else { None }; let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger); let mut accepted_htlcs = Vec::new(); - mem::swap(&mut accepted_htlcs, &mut self.monitor_pending_forwards); + mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); let mut failed_htlcs = Vec::new(); - mem::swap(&mut failed_htlcs, &mut self.monitor_pending_failures); + mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); - mem::swap(&mut finalized_claimed_htlcs, &mut self.monitor_pending_finalized_fulfills); + mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); - if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { - self.monitor_pending_revoke_and_ack = false; - self.monitor_pending_commitment_signed = false; + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { + self.context.monitor_pending_revoke_and_ack = false; + self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs }; } - let raa = if self.monitor_pending_revoke_and_ack { + let raa = if self.context.monitor_pending_revoke_and_ack { Some(self.get_last_revoke_and_ack()) } else { None }; - let commitment_update = if self.monitor_pending_commitment_signed { + let commitment_update = if self.context.monitor_pending_commitment_signed { self.mark_awaiting_response(); Some(self.get_last_commitment_update(logger)) } else { None }; - self.monitor_pending_revoke_and_ack = false; - self.monitor_pending_commitment_signed = false; - let order = self.resend_order.clone(); + self.context.monitor_pending_revoke_and_ack = false; + self.context.monitor_pending_commitment_signed = false; + let order = self.context.resend_order.clone(); log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, @@ -4111,14 +4120,14 @@ impl Channel { if self.is_outbound() { return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } - Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.feerate_per_kw), logger)?; + Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate(None); - self.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); - self.update_time_counter += 1; + self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); + self.context.update_time_counter += 1; // If the feerate has increased over the previous dust buffer (note that // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we // won't be pushed over our dust exposure limit by the feerate increase. @@ -4140,10 +4149,10 @@ impl Channel { } fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); - let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 2); + let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); msgs::RevokeAndACK { - channel_id: self.channel_id, + channel_id: self.context.channel_id, per_commitment_secret, next_per_commitment_point, #[cfg(taproot)] @@ -4157,7 +4166,7 @@ impl Channel { let mut update_fail_htlcs = Vec::new(); let mut update_fail_malformed_htlcs = Vec::new(); - for htlc in self.pending_outbound_htlcs.iter() { + for htlc in self.context.pending_outbound_htlcs.iter() { if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state { update_add_htlcs.push(msgs::UpdateAddHTLC { channel_id: self.channel_id(), @@ -4170,7 +4179,7 @@ impl Channel { } } - for htlc in self.pending_inbound_htlcs.iter() { + for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { match reason { &InboundHTLCRemovalReason::FailRelay(ref err_packet) => { @@ -4199,10 +4208,10 @@ impl Channel { } } - let update_fee = if self.is_outbound() && self.pending_update_fee.is_some() { + let update_fee = if self.is_outbound() && self.context.pending_update_fee.is_some() { Some(msgs::UpdateFee { channel_id: self.channel_id(), - feerate_per_kw: self.pending_update_fee.unwrap().0, + feerate_per_kw: self.context.pending_update_fee.unwrap().0, }) } else { None }; @@ -4230,7 +4239,7 @@ impl Channel { L::Target: Logger, NS::Target: NodeSigner { - if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it // almost certainly indicates we are going to end up out-of-sync in some way, so we // just close here instead of trying to recover. @@ -4243,17 +4252,17 @@ impl Channel { } if msg.next_remote_commitment_number > 0 { - let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx); + let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; - if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) { + if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); } - if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { + if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { macro_rules! log_and_panic { ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); - panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); + log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); + panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); } } log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ @@ -4269,7 +4278,7 @@ impl Channel { // Before we change the state of the channel, we check if the peer is sending a very old // commitment transaction number, if yes we send a warning message. - let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number - 1; + let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; if msg.next_remote_commitment_number + 1 < our_commitment_transaction { return Err( ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction)) @@ -4278,23 +4287,23 @@ impl Channel { // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all // remaining cases either succeed or ErrorMessage-fail). - self.channel_state &= !(ChannelState::PeerDisconnected as u32); - self.sent_message_awaiting_response = None; + self.context.channel_state &= !(ChannelState::PeerDisconnected as u32); + self.context.sent_message_awaiting_response = None; - let shutdown_msg = if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 { - assert!(self.shutdown_scriptpubkey.is_some()); + let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 { + assert!(self.context.shutdown_scriptpubkey.is_some()); Some(msgs::Shutdown { - channel_id: self.channel_id, + channel_id: self.context.channel_id, scriptpubkey: self.get_closing_scriptpubkey(), }) } else { None }; let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger); - if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { + if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. - if self.channel_state & (ChannelState::OurChannelReady as u32) == 0 || - self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 || + self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { if msg.next_remote_commitment_number != 0 { return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned())); } @@ -4308,12 +4317,12 @@ impl Channel { } // We have OurChannelReady set! - let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); + let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); return Ok(ReestablishResponses { channel_ready: Some(msgs::ChannelReady { channel_id: self.channel_id(), next_per_commitment_point, - short_channel_id_alias: Some(self.outbound_scid_alias), + short_channel_id_alias: Some(self.context.outbound_scid_alias), }), raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, @@ -4321,13 +4330,13 @@ impl Channel { }); } - let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { + let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { // Remote isn't waiting on any RevokeAndACK from us! // Note that if we need to repeat our ChannelReady we'll do that in the next if block. None - } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.cur_holder_commitment_transaction_number { - if self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { - self.monitor_pending_revoke_and_ack = true; + } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number { + if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + self.context.monitor_pending_revoke_and_ack = true; None } else { Some(self.get_last_revoke_and_ack()) @@ -4340,19 +4349,19 @@ impl Channel { // revoke_and_ack, not on sending commitment_signed, so we add one if have // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten // the corresponding revoke_and_ack back yet. - let is_awaiting_remote_revoke = self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0; + let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0; if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() { self.mark_awaiting_response(); } - let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; + let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 { + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady - let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); + let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); Some(msgs::ChannelReady { channel_id: self.channel_id(), next_per_commitment_point, - short_channel_id_alias: Some(self.outbound_scid_alias), + short_channel_id_alias: Some(self.context.outbound_scid_alias), }) } else { None }; @@ -4367,7 +4376,7 @@ impl Channel { channel_ready, shutdown_msg, announcement_sigs, raa: required_revoke, commitment_update: None, - order: self.resend_order.clone(), + order: self.context.resend_order.clone(), }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { @@ -4376,19 +4385,19 @@ impl Channel { log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id())); } - if self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { - self.monitor_pending_commitment_signed = true; + if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + self.context.monitor_pending_commitment_signed = true; Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, commitment_update: None, raa: None, - order: self.resend_order.clone(), + order: self.context.resend_order.clone(), }) } else { Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, raa: required_revoke, commitment_update: Some(self.get_last_commitment_update(logger)), - order: self.resend_order.clone(), + order: self.context.resend_order.clone(), }) } } else { @@ -4403,7 +4412,7 @@ impl Channel { -> (u64, u64) where F::Target: FeeEstimator { - if let Some((min, max)) = self.closing_fee_limits { return (min, max); } + if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); } // Propose a range from our current Background feerate to our Normal feerate plus our // force_close_avoidance_max_fee_satoshis. @@ -4418,8 +4427,8 @@ impl Channel { // very good reason to apply such a limit in any case. We don't bother doing so, risking // some force-closure by old nodes, but we wanted to close the channel anyway. - if let Some(target_feerate) = self.target_closing_feerate_sats_per_kw { - let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.feerate_per_kw, target_feerate) }; + if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw { + let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) }; proposed_feerate = cmp::max(proposed_feerate, min_feerate); proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate); } @@ -4431,20 +4440,20 @@ impl Channel { // come to consensus with our counterparty on appropriate fees, however it should be a // relatively rare case. We can revisit this later, though note that in order to determine // if the funders' output is dust we have to know the absolute fee we're going to use. - let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); + let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; let proposed_max_total_fee_satoshis = if self.is_outbound() { // We always add force_close_avoidance_max_fee_satoshis to our normal // feerate-calculated fee, but allow the max to be overridden if we're using a // target feerate-calculated fee. - cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.config.options.force_close_avoidance_max_fee_satoshis, + cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis, proposed_max_feerate as u64 * tx_weight / 1000) } else { - self.channel_value_satoshis - (self.value_to_self_msat + 999) / 1000 + self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000 }; - self.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis)); - self.closing_fee_limits.clone().unwrap() + self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis)); + self.context.closing_fee_limits.clone().unwrap() } /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true @@ -4452,12 +4461,12 @@ impl Channel { /// this point if we're the funder we should send the initial closing_signed, and in any case /// shutdown should complete within a reasonable timeframe. fn closing_negotiation_ready(&self) -> bool { - self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() && - self.channel_state & + self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() && + self.context.channel_state & (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK && - self.pending_update_fee.is_none() + self.context.pending_update_fee.is_none() } /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning @@ -4465,10 +4474,10 @@ impl Channel { /// Should be called on a one-minute timer. pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> { if self.closing_negotiation_ready() { - if self.closing_signed_in_flight { + if self.context.closing_signed_in_flight { return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned())); } else { - self.closing_signed_in_flight = true; + self.context.closing_signed_in_flight = true; } } Ok(()) @@ -4479,12 +4488,12 @@ impl Channel { -> Result<(Option, Option), ChannelError> where F::Target: FeeEstimator, L::Target: Logger { - if self.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() { + if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() { return Ok((None, None)); } if !self.is_outbound() { - if let Some(msg) = &self.pending_counterparty_closing_signed.take() { + if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() { return self.closing_signed(fee_estimator, &msg); } return Ok((None, None)); @@ -4492,18 +4501,18 @@ impl Channel { let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); - assert!(self.shutdown_scriptpubkey.is_some()); + assert!(self.context.shutdown_scriptpubkey.is_some()); let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false); log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", our_min_fee, our_max_fee, total_fee_satoshis); - let sig = self.holder_signer - .sign_closing_transaction(&closing_tx, &self.secp_ctx) + let sig = self.context.holder_signer + .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; - self.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); + self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); Ok((Some(msgs::ClosingSigned { - channel_id: self.channel_id, + channel_id: self.context.channel_id, fee_satoshis: total_fee_satoshis, signature: sig, fee_range: Some(msgs::ClosingSignedFeeRange { @@ -4517,7 +4526,7 @@ impl Channel { // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt // a reconnection. fn mark_awaiting_response(&mut self) { - self.sent_message_awaiting_response = Some(0); + self.context.sent_message_awaiting_response = Some(0); } /// Determines whether we should disconnect the counterparty due to not receiving a response @@ -4525,7 +4534,7 @@ impl Channel { /// /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`]. pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool { - let ticks_elapsed = if let Some(ticks_elapsed) = self.sent_message_awaiting_response.as_mut() { + let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() { ticks_elapsed } else { // Don't disconnect when we're not waiting on a response. @@ -4540,40 +4549,40 @@ impl Channel { ) -> Result<(Option, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where SP::Target: SignerProvider { - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); } - if self.channel_state < ChannelState::FundingSent as u32 { + if self.context.channel_state < ChannelState::FundingSent as u32 { // Spec says we should fail the connection, not the channel, but that's nonsense, there // are plenty of reasons you may want to fail a channel pre-funding, and spec says you // can do that via error message without getting a connection fail anyway... return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned())); } - for htlc in self.pending_inbound_htlcs.iter() { + for htlc in self.context.pending_inbound_htlcs.iter() { if let InboundHTLCState::RemoteAnnounced(_) = htlc.state { return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned())); } } - assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) { return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))); } - if self.counterparty_shutdown_scriptpubkey.is_some() { - if Some(&msg.scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() { + if self.context.counterparty_shutdown_scriptpubkey.is_some() { + if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() { return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex()))); } } else { - self.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone()); + self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone()); } // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc // immediately after the commitment dance, but we can send a Shutdown because we won't send // any further commitment updates after we set LocalShutdownSent. - let send_shutdown = (self.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32; + let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32; - let update_shutdown_script = match self.shutdown_scriptpubkey { + let update_shutdown_script = match self.context.shutdown_scriptpubkey { Some(_) => false, None => { assert!(send_shutdown); @@ -4584,32 +4593,32 @@ impl Channel { if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); } - self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); + self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true }, }; // From here on out, we may not fail! - self.channel_state |= ChannelState::RemoteShutdownSent as u32; - self.update_time_counter += 1; + self.context.channel_state |= ChannelState::RemoteShutdownSent as u32; + self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { - self.latest_monitor_update_id += 1; + self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, + update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey: self.get_closing_scriptpubkey(), }], }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); if self.push_blockable_mon_update(monitor_update) { - self.pending_monitor_updates.last().map(|upd| &upd.update) + self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None } } else { None }; let shutdown = if send_shutdown { Some(msgs::Shutdown { - channel_id: self.channel_id, + channel_id: self.context.channel_id, scriptpubkey: self.get_closing_scriptpubkey(), }) } else { None }; @@ -4617,9 +4626,9 @@ impl Channel { // We can't send our shutdown until we've committed all of our pending HTLCs, but the // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding // cell HTLCs and return them to fail the payment. - self.holding_cell_update_fee = None; - let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len()); - self.holding_cell_htlc_updates.retain(|htlc_update| { + self.context.holding_cell_update_fee = None; + let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); + self.context.holding_cell_htlc_updates.retain(|htlc_update| { match htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { dropped_outbound_htlcs.push((source.clone(), payment_hash.clone())); @@ -4629,8 +4638,8 @@ impl Channel { } }); - self.channel_state |= ChannelState::LocalShutdownSent as u32; - self.update_time_counter += 1; + self.context.channel_state |= ChannelState::LocalShutdownSent as u32; + self.context.update_time_counter += 1; Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } @@ -4663,25 +4672,25 @@ impl Channel { -> Result<(Option, Option), ChannelError> where F::Target: FeeEstimator { - if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK { + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK { return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned())); } - if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned())); } - if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() { + if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() { return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned())); } if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } - if self.is_outbound() && self.last_sent_closing_fee.is_none() { + if self.is_outbound() && self.context.last_sent_closing_fee.is_none() { return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); } - if self.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 { - self.pending_counterparty_closing_signed = Some(msg.clone()); + if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 { + self.context.pending_counterparty_closing_signed = Some(msg.clone()); return Ok((None, None)); } @@ -4690,16 +4699,16 @@ impl Channel { if used_total_fee != msg.fee_satoshis { return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); } - let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.channel_value_satoshis); + let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); - match self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { + match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { Ok(_) => {}, Err(_e) => { // The remote end may have decided to revoke their output due to inconsistent dust // limits, so check for that case by re-checking the signature here. closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0; - let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.channel_value_satoshis); - secp_check!(self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned()); + let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned()); }, }; @@ -4709,12 +4718,12 @@ impl Channel { } } - assert!(self.shutdown_scriptpubkey.is_some()); - if let Some((last_fee, sig)) = self.last_sent_closing_fee { + assert!(self.context.shutdown_scriptpubkey.is_some()); + if let Some((last_fee, sig)) = self.context.last_sent_closing_fee { if last_fee == msg.fee_satoshis { let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); - self.channel_state = ChannelState::ShutdownComplete as u32; - self.update_time_counter += 1; + self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.update_time_counter += 1; return Ok((None, Some(tx))); } } @@ -4729,20 +4738,20 @@ impl Channel { self.build_closing_transaction($new_fee, false) }; - let sig = self.holder_signer - .sign_closing_transaction(&closing_tx, &self.secp_ctx) + let sig = self.context.holder_signer + .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; let signed_tx = if $new_fee == msg.fee_satoshis { - self.channel_state = ChannelState::ShutdownComplete as u32; - self.update_time_counter += 1; + self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.update_time_counter += 1; let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig); Some(tx) } else { None }; - self.last_sent_closing_fee = Some((used_fee, sig.clone())); + self.context.last_sent_closing_fee = Some((used_fee, sig.clone())); return Ok((Some(msgs::ClosingSigned { - channel_id: self.channel_id, + channel_id: self.context.channel_id, fee_satoshis: used_fee, signature: sig, fee_range: Some(msgs::ClosingSignedFeeRange { @@ -4767,7 +4776,7 @@ impl Channel { if !self.is_outbound() { // They have to pay, so pick the highest fee in the overlapping range. // We should never set an upper bound aside from their full balance - debug_assert_eq!(our_max_fee, self.channel_value_satoshis - (self.value_to_self_msat + 999) / 1000); + debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000); propose_fee!(cmp::min(max_fee_satoshis, our_max_fee)); } else { if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee { @@ -4780,7 +4789,7 @@ impl Channel { } else { // Old fee style negotiation. We don't bother to enforce whether they are complying // with the "making progress" requirements, we just comply and hope for the best. - if let Some((last_fee, _)) = self.last_sent_closing_fee { + if let Some((last_fee, _)) = self.context.last_sent_closing_fee { if msg.fee_satoshis > last_fee { if msg.fee_satoshis < our_max_fee { propose_fee!(msg.fee_satoshis); @@ -4813,105 +4822,105 @@ impl Channel { // Public utilities: pub fn channel_id(&self) -> [u8; 32] { - self.channel_id + self.context.channel_id } // Return the `temporary_channel_id` used during channel establishment. // // Will return `None` for channels created prior to LDK version 0.0.115. pub fn temporary_channel_id(&self) -> Option<[u8; 32]> { - self.temporary_channel_id + self.context.temporary_channel_id } pub fn minimum_depth(&self) -> Option { - self.minimum_depth + self.context.minimum_depth } /// Gets the "user_id" value passed into the construction of this channel. It has no special /// meaning and exists only to allow users to have a persistent identifier of a channel. pub fn get_user_id(&self) -> u128 { - self.user_id + self.context.user_id } /// Gets the channel's type pub fn get_channel_type(&self) -> &ChannelTypeFeatures { - &self.channel_type + &self.context.channel_type } /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus, /// is_usable() returns true). /// Allowed in any state (including after shutdown) pub fn get_short_channel_id(&self) -> Option { - self.short_channel_id + self.context.short_channel_id } /// Allowed in any state (including after shutdown) pub fn latest_inbound_scid_alias(&self) -> Option { - self.latest_inbound_scid_alias + self.context.latest_inbound_scid_alias } /// Allowed in any state (including after shutdown) pub fn outbound_scid_alias(&self) -> u64 { - self.outbound_scid_alias + self.context.outbound_scid_alias } /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases. pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) { - assert_eq!(self.outbound_scid_alias, 0); - self.outbound_scid_alias = outbound_scid_alias; + assert_eq!(self.context.outbound_scid_alias, 0); + self.context.outbound_scid_alias = outbound_scid_alias; } /// Returns the funding_txo we either got from our peer, or were given by /// get_outbound_funding_created. pub fn get_funding_txo(&self) -> Option { - self.channel_transaction_parameters.funding_outpoint + self.context.channel_transaction_parameters.funding_outpoint } /// Returns the block hash in which our funding transaction was confirmed. pub fn get_funding_tx_confirmed_in(&self) -> Option { - self.funding_tx_confirmed_in + self.context.funding_tx_confirmed_in } /// Returns the current number of confirmations on the funding transaction. pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 { - if self.funding_tx_confirmation_height == 0 { + if self.context.funding_tx_confirmation_height == 0 { // We either haven't seen any confirmation yet, or observed a reorg. return 0; } - height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1) + height.checked_sub(self.context.funding_tx_confirmation_height).map_or(0, |c| c + 1) } fn get_holder_selected_contest_delay(&self) -> u16 { - self.channel_transaction_parameters.holder_selected_contest_delay + self.context.channel_transaction_parameters.holder_selected_contest_delay } fn get_holder_pubkeys(&self) -> &ChannelPublicKeys { - &self.channel_transaction_parameters.holder_pubkeys + &self.context.channel_transaction_parameters.holder_pubkeys } pub fn get_counterparty_selected_contest_delay(&self) -> Option { - self.channel_transaction_parameters.counterparty_parameters + self.context.channel_transaction_parameters.counterparty_parameters .as_ref().map(|params| params.selected_contest_delay) } fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys { - &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys + &self.context.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys } /// Allowed in any state (including after shutdown) pub fn get_counterparty_node_id(&self) -> PublicKey { - self.counterparty_node_id + self.context.counterparty_node_id } /// Allowed in any state (including after shutdown) pub fn get_holder_htlc_minimum_msat(&self) -> u64 { - self.holder_htlc_minimum_msat + self.context.holder_htlc_minimum_msat } /// Allowed in any state (including after shutdown), but will return none before TheirInitSent pub fn get_holder_htlc_maximum_msat(&self) -> Option { - self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat) + self.get_htlc_maximum_msat(self.context.holder_max_htlc_value_in_flight_msat) } /// Allowed in any state (including after shutdown) @@ -4920,111 +4929,111 @@ impl Channel { // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts // to use full capacity. This is an effort to reduce routing failures, because in many cases // channel might have been used to route very small values (either by honest users or as DoS). - self.channel_value_satoshis * 1000 * 9 / 10, + self.context.channel_value_satoshis * 1000 * 9 / 10, - self.counterparty_max_htlc_value_in_flight_msat + self.context.counterparty_max_htlc_value_in_flight_msat ); } /// Allowed in any state (including after shutdown) pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 { - self.counterparty_htlc_minimum_msat + self.context.counterparty_htlc_minimum_msat } /// Allowed in any state (including after shutdown), but will return none before TheirInitSent pub fn get_counterparty_htlc_maximum_msat(&self) -> Option { - self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat) + self.get_htlc_maximum_msat(self.context.counterparty_max_htlc_value_in_flight_msat) } fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option { - self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| { - let holder_reserve = self.holder_selected_channel_reserve_satoshis; + self.context.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| { + let holder_reserve = self.context.holder_selected_channel_reserve_satoshis; cmp::min( - (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000, + (self.context.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000, party_max_htlc_value_in_flight_msat ) }) } pub fn get_value_satoshis(&self) -> u64 { - self.channel_value_satoshis + self.context.channel_value_satoshis } pub fn get_fee_proportional_millionths(&self) -> u32 { - self.config.options.forwarding_fee_proportional_millionths + self.context.config.options.forwarding_fee_proportional_millionths } pub fn get_cltv_expiry_delta(&self) -> u16 { - cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) + cmp::max(self.context.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) } pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 { - self.config.options.max_dust_htlc_exposure_msat + self.context.config.options.max_dust_htlc_exposure_msat } /// Returns the previous [`ChannelConfig`] applied to this channel, if any. pub fn prev_config(&self) -> Option { - self.prev_config.map(|prev_config| prev_config.0) + self.context.prev_config.map(|prev_config| prev_config.0) } // Checks whether we should emit a `ChannelPending` event. pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool { - self.is_funding_initiated() && !self.channel_pending_event_emitted + self.is_funding_initiated() && !self.context.channel_pending_event_emitted } // Returns whether we already emitted a `ChannelPending` event. pub(crate) fn channel_pending_event_emitted(&self) -> bool { - self.channel_pending_event_emitted + self.context.channel_pending_event_emitted } // Remembers that we already emitted a `ChannelPending` event. pub(crate) fn set_channel_pending_event_emitted(&mut self) { - self.channel_pending_event_emitted = true; + self.context.channel_pending_event_emitted = true; } // Checks whether we should emit a `ChannelReady` event. pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { - self.is_usable() && !self.channel_ready_event_emitted + self.is_usable() && !self.context.channel_ready_event_emitted } // Remembers that we already emitted a `ChannelReady` event. pub(crate) fn set_channel_ready_event_emitted(&mut self) { - self.channel_ready_event_emitted = true; + self.context.channel_ready_event_emitted = true; } /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will /// no longer be considered when forwarding HTLCs. pub fn maybe_expire_prev_config(&mut self) { - if self.prev_config.is_none() { + if self.context.prev_config.is_none() { return; } - let prev_config = self.prev_config.as_mut().unwrap(); + let prev_config = self.context.prev_config.as_mut().unwrap(); prev_config.1 += 1; if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS { - self.prev_config = None; + self.context.prev_config = None; } } /// Returns the current [`ChannelConfig`] applied to the channel. pub fn config(&self) -> ChannelConfig { - self.config.options + self.context.config.options } /// Updates the channel's config. A bool is returned indicating whether the config update /// applied resulted in a new ChannelUpdate message. pub fn update_config(&mut self, config: &ChannelConfig) -> bool { let did_channel_update = - self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths || - self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat || - self.config.options.cltv_expiry_delta != config.cltv_expiry_delta; + self.context.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths || + self.context.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat || + self.context.config.options.cltv_expiry_delta != config.cltv_expiry_delta; if did_channel_update { - self.prev_config = Some((self.config.options, 0)); + self.context.prev_config = Some((self.context.config.options, 0)); // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay // policy change to propagate throughout the network. - self.update_time_counter += 1; + self.context.update_time_counter += 1; } - self.config.options = *config; + self.context.config.options = *config; did_channel_update } @@ -5066,7 +5075,7 @@ impl Channel { } pub fn get_feerate_sat_per_1000_weight(&self) -> u32 { - self.feerate_per_kw + self.context.feerate_per_kw } pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option) -> u32 { @@ -5075,10 +5084,10 @@ impl Channel { // whichever is higher. This ensures that we aren't suddenly exposed to significantly // more dust balance if the feerate increases when we have several HTLCs pending // which are near the dust limit. - let mut feerate_per_kw = self.feerate_per_kw; + let mut feerate_per_kw = self.context.feerate_per_kw; // If there's a pending update fee, use it to ensure we aren't under-estimating // potential feerate updates coming soon. - if let Some((feerate, _)) = self.pending_update_fee { + if let Some((feerate, _)) = self.context.pending_update_fee { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } if let Some(feerate) = outbound_feerate_update { @@ -5088,33 +5097,33 @@ impl Channel { } pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { - self.cur_holder_commitment_transaction_number + 1 + self.context.cur_holder_commitment_transaction_number + 1 } pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { - self.cur_counterparty_commitment_transaction_number + 1 - if self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 } + self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 } } pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 { - self.cur_counterparty_commitment_transaction_number + 2 + self.context.cur_counterparty_commitment_transaction_number + 2 } #[cfg(test)] pub fn get_signer(&self) -> &Signer { - &self.holder_signer + &self.context.holder_signer } #[cfg(test)] pub fn get_value_stat(&self) -> ChannelValueStat { ChannelValueStat { - value_to_self_msat: self.value_to_self_msat, - channel_value_msat: self.channel_value_satoshis * 1000, - channel_reserve_msat: self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000, - pending_outbound_htlcs_amount_msat: self.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::(), - pending_inbound_htlcs_amount_msat: self.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::(), + value_to_self_msat: self.context.value_to_self_msat, + channel_value_msat: self.context.channel_value_satoshis * 1000, + channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000, + pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::(), + pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::(), holding_cell_outbound_amount_msat: { let mut res = 0; - for h in self.holding_cell_htlc_updates.iter() { + for h in self.context.holding_cell_htlc_updates.iter() { match h { &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => { res += amount_msat; @@ -5124,72 +5133,72 @@ impl Channel { } res }, - counterparty_max_htlc_value_in_flight_msat: self.counterparty_max_htlc_value_in_flight_msat, - counterparty_dust_limit_msat: self.counterparty_dust_limit_satoshis * 1000, + counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat, + counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000, } } /// Allowed in any state (including after shutdown) pub fn get_update_time_counter(&self) -> u32 { - self.update_time_counter + self.context.update_time_counter } pub fn get_latest_monitor_update_id(&self) -> u64 { - self.latest_monitor_update_id + self.context.latest_monitor_update_id } pub fn should_announce(&self) -> bool { - self.config.announced_channel + self.context.config.announced_channel } pub fn is_outbound(&self) -> bool { - self.channel_transaction_parameters.is_outbound_from_holder + self.context.channel_transaction_parameters.is_outbound_from_holder } /// Gets the fee we'd want to charge for adding an HTLC output to this Channel /// Allowed in any state (including after shutdown) pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 { - self.config.options.forwarding_fee_base_msat + self.context.config.options.forwarding_fee_base_msat } /// Returns true if we've ever received a message from the remote end for this Channel pub fn have_received_message(&self) -> bool { - self.channel_state > (ChannelState::OurInitSent as u32) + self.context.channel_state > (ChannelState::OurInitSent as u32) } /// Returns true if this channel is fully established and not known to be closing. /// Allowed in any state (including after shutdown) pub fn is_usable(&self) -> bool { let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK; - (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready + (self.context.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.context.monitor_pending_channel_ready } /// Returns true if this channel is currently available for use. This is a superset of /// is_usable() and considers things like the channel being temporarily disabled. /// Allowed in any state (including after shutdown) pub fn is_live(&self) -> bool { - self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0) + self.is_usable() && (self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0) } /// Returns true if this channel has been marked as awaiting a monitor update to move forward. /// Allowed in any state (including after shutdown) pub fn is_awaiting_monitor_update(&self) -> bool { - (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 + (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 } pub fn get_latest_complete_monitor_update_id(&self) -> u64 { - if self.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); } - self.pending_monitor_updates[0].update.update_id - 1 + if self.context.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); } + self.context.pending_monitor_updates[0].update.update_id - 1 } /// Returns the next blocked monitor update, if one exists, and a bool which indicates a /// further blocked monitor update exists after the next. pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> { - for i in 0..self.pending_monitor_updates.len() { - if self.pending_monitor_updates[i].blocked { - self.pending_monitor_updates[i].blocked = false; - return Some((&self.pending_monitor_updates[i].update, - self.pending_monitor_updates.len() > i + 1)); + for i in 0..self.context.pending_monitor_updates.len() { + if self.context.pending_monitor_updates[i].blocked { + self.context.pending_monitor_updates[i].blocked = false; + return Some((&self.context.pending_monitor_updates[i].update, + self.context.pending_monitor_updates.len() > i + 1)); } } None @@ -5198,8 +5207,8 @@ impl Channel { /// Pushes a new monitor update into our monitor update queue, returning whether it should be /// immediately given to the user for persisting or if it should be held as blocked. fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool { - let release_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked); - self.pending_monitor_updates.push(PendingChannelMonitorUpdate { + let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked); + self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { update, blocked: !release_monitor }); release_monitor @@ -5211,15 +5220,15 @@ impl Channel { fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> Option<&ChannelMonitorUpdate> { let release_monitor = self.push_blockable_mon_update(update); - if release_monitor { self.pending_monitor_updates.last().map(|upd| &upd.update) } else { None } + if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None } } pub fn no_monitor_updates_pending(&self) -> bool { - self.pending_monitor_updates.is_empty() + self.context.pending_monitor_updates.is_empty() } pub fn complete_all_mon_updates_through(&mut self, update_id: u64) { - self.pending_monitor_updates.retain(|upd| { + self.context.pending_monitor_updates.retain(|upd| { if upd.update.update_id <= update_id { assert!(!upd.blocked, "Completed update must have flown"); false @@ -5228,18 +5237,18 @@ impl Channel { } pub fn complete_one_mon_update(&mut self, update_id: u64) { - self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id); + self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id); } /// Returns an iterator over all unblocked monitor updates which have not yet completed. pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator { - self.pending_monitor_updates.iter() + self.context.pending_monitor_updates.iter() .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) }) } /// Returns true if funding_created was sent/received. pub fn is_funding_initiated(&self) -> bool { - self.channel_state >= ChannelState::FundingSent as u32 + self.context.channel_state >= ChannelState::FundingSent as u32 } /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor. @@ -5248,16 +5257,16 @@ impl Channel { /// advanced state. pub fn is_awaiting_initial_mon_persist(&self) -> bool { if !self.is_awaiting_monitor_update() { return false; } - if self.channel_state & + if self.context.channel_state & !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) == ChannelState::FundingSent as u32 { // If we're not a 0conf channel, we'll be waiting on a monitor update with only // FundingSent set, though our peer could have sent their channel_ready. - debug_assert!(self.minimum_depth.unwrap_or(1) > 0); + debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0); return true; } - if self.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 && - self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { + if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 && + self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while // waiting for the initial monitor persistence. Thus, we check if our commitment // transaction numbers have both been iterated only exactly once (for the @@ -5270,9 +5279,9 @@ impl Channel { // Because deciding we're awaiting initial broadcast spuriously could result in // funds-loss (as we don't have a monitor, but have the funding transaction confirmed), // we hard-assert here, even in production builds. - if self.is_outbound() { assert!(self.funding_transaction.is_some()); } - assert!(self.monitor_pending_channel_ready); - assert_eq!(self.latest_monitor_update_id, 0); + if self.is_outbound() { assert!(self.context.funding_transaction.is_some()); } + assert!(self.context.monitor_pending_channel_ready); + assert_eq!(self.context.latest_monitor_update_id, 0); return true; } false @@ -5280,68 +5289,68 @@ impl Channel { /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { - (self.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.channel_state >= ChannelState::ChannelReady as u32 + (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32 } /// Returns true if our peer has either initiated or agreed to shut down the channel. pub fn received_shutdown(&self) -> bool { - (self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0 + (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0 } /// Returns true if we either initiated or agreed to shut down the channel. pub fn sent_shutdown(&self) -> bool { - (self.channel_state & ChannelState::LocalShutdownSent as u32) != 0 + (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0 } /// Returns true if this channel is fully shut down. True here implies that no further actions /// may/will be taken on this channel, and thus this object should be freed. Any future changes /// will be handled appropriately by the chain monitor. pub fn is_shutdown(&self) -> bool { - if (self.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 { - assert!(self.channel_state == ChannelState::ShutdownComplete as u32); + if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 { + assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32); true } else { false } } pub fn channel_update_status(&self) -> ChannelUpdateStatus { - self.channel_update_status + self.context.channel_update_status } pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) { - self.update_time_counter += 1; - self.channel_update_status = status; + self.context.update_time_counter += 1; + self.context.channel_update_status = status; } fn check_get_channel_ready(&mut self, height: u32) -> Option { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 - if self.funding_tx_confirmation_height == 0 && self.minimum_depth != Some(0) { + if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) { return None; } - let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1; + let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1; if funding_tx_confirmations <= 0 { - self.funding_tx_confirmation_height = 0; + self.context.funding_tx_confirmation_height = 0; } - if funding_tx_confirmations < self.minimum_depth.unwrap_or(0) as i64 { + if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 { return None; } - let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); + let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { - self.channel_state |= ChannelState::OurChannelReady as u32; + self.context.channel_state |= ChannelState::OurChannelReady as u32; true } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) { - self.channel_state = ChannelState::ChannelReady as u32 | (self.channel_state & MULTI_STATE_FLAGS); - self.update_time_counter += 1; + self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); + self.context.update_time_counter += 1; true } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { // We got a reorg but not enough to trigger a force close, just ignore. false } else { - if self.funding_tx_confirmation_height != 0 && self.channel_state < ChannelState::ChannelReady as u32 { + if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 { // We should never see a funding transaction on-chain until we've received // funding_signed (if we're an outbound channel), or seen funding_generated (if we're // an inbound channel - before that we have no known funding TXID). The fuzzer, @@ -5349,25 +5358,25 @@ impl Channel { #[cfg(not(fuzzing))] panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\ Do NOT broadcast a funding transaction manually - let LDK do it for you!", - self.channel_state); + self.context.channel_state); } // We got a reorg but not enough to trigger a force close, just ignore. false }; if need_commitment_update { - if self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 { - if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { + if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { let next_per_commitment_point = - self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.secp_ctx); + self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); return Some(msgs::ChannelReady { - channel_id: self.channel_id, + channel_id: self.context.channel_id, next_per_commitment_point, - short_channel_id_alias: Some(self.outbound_scid_alias), + short_channel_id_alias: Some(self.context.outbound_scid_alias), }); } } else { - self.monitor_pending_channel_ready = true; + self.context.monitor_pending_channel_ready = true; } } None @@ -5388,11 +5397,11 @@ impl Channel { for &(index_in_block, tx) in txdata.iter() { // Check if the transaction is the expected funding transaction, and if it is, // check that it pays the right amount to the right script. - if self.funding_tx_confirmation_height == 0 { + if self.context.funding_tx_confirmation_height == 0 { if tx.txid() == funding_txo.txid { let txo_idx = funding_txo.index as usize; if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() || - tx.output[txo_idx].value != self.channel_value_satoshis { + tx.output[txo_idx].value != self.context.channel_value_satoshis { if self.is_outbound() { // If we generated the funding transaction and it doesn't match what it // should, the client is really broken and we should just panic and @@ -5402,7 +5411,7 @@ impl Channel { #[cfg(not(fuzzing))] panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!"); } - self.update_time_counter += 1; + self.context.update_time_counter += 1; let err_reason = "funding tx had wrong script/value or output index"; return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { @@ -5416,9 +5425,9 @@ impl Channel { } } } - self.funding_tx_confirmation_height = height; - self.funding_tx_confirmed_in = Some(*block_hash); - self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) { + self.context.funding_tx_confirmation_height = height; + self.context.funding_tx_confirmed_in = Some(*block_hash); + self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) { Ok(scid) => Some(scid), Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"), } @@ -5428,7 +5437,7 @@ impl Channel { // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). if let Some(channel_ready) = self.check_get_channel_ready(height) { - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id)); + log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger); return Ok((Some(channel_ready), announcement_sigs)); } @@ -5479,7 +5488,7 @@ impl Channel { // forward an HTLC when our counterparty should almost certainly just fail it for expiring // ~now. let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS; - self.holding_cell_htlc_updates.retain(|htlc_update| { + self.context.holding_cell_htlc_updates.retain(|htlc_update| { match htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => { if *cltv_expiry <= unforwarded_htlc_cltv_limit { @@ -5491,21 +5500,21 @@ impl Channel { } }); - self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time); + self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); if let Some(channel_ready) = self.check_get_channel_ready(height) { let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) } else { None }; - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id)); + log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); } - let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); + let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); if non_shutdown_state >= ChannelState::ChannelReady as u32 || (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 { - let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1; - if self.funding_tx_confirmation_height == 0 { + let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1; + if self.context.funding_tx_confirmation_height == 0 { // Note that check_get_channel_ready may reset funding_tx_confirmation_height to // zero if it has been reorged out, however in either case, our state flags // indicate we've already sent a channel_ready @@ -5521,14 +5530,14 @@ impl Channel { // 0-conf channel, but not doing so may lead to the // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have // to. - if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() { + if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() { let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", - self.minimum_depth.unwrap(), funding_tx_confirmations); + self.context.minimum_depth.unwrap(), funding_tx_confirmations); return Err(ClosureReason::ProcessingError { err: err_reason }); } - } else if !self.is_outbound() && self.funding_tx_confirmed_in.is_none() && - height >= self.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { - log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id)); + } else if !self.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && + height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { + log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id)); // If funding_tx_confirmed_in is unset, the channel must not be active assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); @@ -5545,14 +5554,14 @@ impl Channel { /// force-close the channel, but may also indicate a harmless reorganization of a block or two /// before the channel has reached channel_ready and we can just wait for more blocks. pub fn funding_transaction_unconfirmed(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger { - if self.funding_tx_confirmation_height != 0 { + if self.context.funding_tx_confirmation_height != 0 { // We handle the funding disconnection by calling best_block_updated with a height one // below where our funding was connected, implying a reorg back to conf_height - 1. - let reorg_height = self.funding_tx_confirmation_height - 1; + let reorg_height = self.context.funding_tx_confirmation_height - 1; // We use the time field to bump the current time we set on channel updates if its // larger. If we don't know that time has moved forward, we can just set it to the last // time we saw and it will be ignored. - let best_time = self.update_time_counter; + let best_time = self.context.update_time_counter; match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) { Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => { assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?"); @@ -5575,52 +5584,52 @@ impl Channel { if !self.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } - if self.channel_state != ChannelState::OurInitSent as u32 { + if self.context.channel_state != ChannelState::OurInitSent as u32 { panic!("Cannot generate an open_channel after we've moved forward"); } - if self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an open_channel for a channel that has already advanced"); } - let first_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let keys = self.get_holder_pubkeys(); msgs::OpenChannel { chain_hash, - temporary_channel_id: self.channel_id, - funding_satoshis: self.channel_value_satoshis, - push_msat: self.channel_value_satoshis * 1000 - self.value_to_self_msat, - dust_limit_satoshis: self.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.holder_htlc_minimum_msat, - feerate_per_kw: self.feerate_per_kw as u32, + temporary_channel_id: self.context.channel_id, + funding_satoshis: self.context.channel_value_satoshis, + push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + feerate_per_kw: self.context.feerate_per_kw as u32, to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.holder_max_accepted_htlcs, + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint, htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, - channel_flags: if self.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { + channel_flags: if self.context.config.announced_channel {1} else {0}, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), - channel_type: Some(self.channel_type.clone()), + channel_type: Some(self.context.channel_type.clone()), } } pub fn inbound_is_awaiting_accept(&self) -> bool { - self.inbound_awaiting_accept + self.context.inbound_awaiting_accept } /// Sets this channel to accepting 0conf, must be done before `get_accept_channel` pub fn set_0conf(&mut self) { - assert!(self.inbound_awaiting_accept); - self.minimum_depth = Some(0); + assert!(self.context.inbound_awaiting_accept); + self.context.minimum_depth = Some(0); } /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which @@ -5631,18 +5640,18 @@ impl Channel { if self.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } - if self.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { + if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { panic!("Tried to send accept_channel after channel had moved forward"); } - if self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an accept_channel for a channel that has already advanced"); } - if !self.inbound_awaiting_accept { + if !self.context.inbound_awaiting_accept { panic!("The inbound channel has already been accepted"); } - self.user_id = user_id; - self.inbound_awaiting_accept = false; + self.context.user_id = user_id; + self.context.inbound_awaiting_accept = false; self.generate_accept_channel_message() } @@ -5653,29 +5662,29 @@ impl Channel { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let keys = self.get_holder_pubkeys(); msgs::AcceptChannel { - temporary_channel_id: self.channel_id, - dust_limit_satoshis: self.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.holder_htlc_minimum_msat, - minimum_depth: self.minimum_depth.unwrap(), + temporary_channel_id: self.context.channel_id, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + minimum_depth: self.context.minimum_depth.unwrap(), to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.holder_max_accepted_htlcs, + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint, htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, - shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), - channel_type: Some(self.channel_type.clone()), + channel_type: Some(self.context.channel_type.clone()), #[cfg(taproot)] next_local_nonce: None, } @@ -5693,8 +5702,8 @@ impl Channel { /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - Ok(self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx) + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) } @@ -5709,34 +5718,34 @@ impl Channel { if !self.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } - if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); } - if self.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } - self.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - self.holder_signer.provide_channel_parameters(&self.channel_transaction_parameters); + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); let signature = match self.get_outbound_funding_created_signature(logger) { Ok(res) => res, Err(e) => { log_error!(logger, "Got bad signatures: {:?}!", e); - self.channel_transaction_parameters.funding_outpoint = None; + self.context.channel_transaction_parameters.funding_outpoint = None; return Err(e); } }; - let temporary_channel_id = self.channel_id; + let temporary_channel_id = self.context.channel_id; // Now that we're past error-generating stuff, update our local state: - self.channel_state = ChannelState::FundingCreated as u32; - self.channel_id = funding_txo.to_channel_id(); - self.funding_transaction = Some(funding_transaction); + self.context.channel_state = ChannelState::FundingCreated as u32; + self.context.channel_id = funding_txo.to_channel_id(); + self.context.funding_transaction = Some(funding_transaction); Ok(msgs::FundingCreated { temporary_channel_id, @@ -5762,7 +5771,7 @@ impl Channel { fn get_channel_announcement( &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig, ) -> Result where NS::Target: NodeSigner { - if !self.config.announced_channel { + if !self.context.config.announced_channel { return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); } if !self.is_usable() { @@ -5796,7 +5805,7 @@ impl Channel { NS::Target: NodeSigner, L::Target: Logger { - if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height { + if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { return None; } @@ -5804,12 +5813,12 @@ impl Channel { return None; } - if self.channel_state & ChannelState::PeerDisconnected as u32 != 0 { + if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 { log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected"); return None; } - if self.announcement_sigs_state != AnnouncementSigsState::NotSent { + if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent { return None; } @@ -5828,14 +5837,14 @@ impl Channel { }, Ok(v) => v }; - let our_bitcoin_sig = match self.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.secp_ctx) { + let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) { Err(_) => { log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!"); return None; }, Ok(v) => v }; - self.announcement_sigs_state = AnnouncementSigsState::MessageSent; + self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent; Some(msgs::AnnouncementSignatures { channel_id: self.channel_id(), @@ -5850,14 +5859,14 @@ impl Channel { fn sign_channel_announcement( &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement ) -> Result where NS::Target: NodeSigner { - if let Some((their_node_sig, their_bitcoin_sig)) = self.announcement_sigs { + if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs { let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?); let were_node_one = announcement.node_id_1 == our_node_key; let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?; - let our_bitcoin_sig = self.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.secp_ctx) + let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?; Ok(msgs::ChannelAnnouncement { node_signature_1: if were_node_one { our_node_sig } else { their_node_sig }, @@ -5882,19 +5891,19 @@ impl Channel { let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); - if self.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() { + if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() { return Err(ChannelError::Close(format!( "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", &announcement, self.get_counterparty_node_id()))); } - if self.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() { + if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() { return Err(ChannelError::Close(format!( "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", &announcement, self.counterparty_funding_pubkey()))); } - self.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature)); - if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height { + self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature)); + if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { return Err(ChannelError::Ignore( "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned())); } @@ -5907,7 +5916,7 @@ impl Channel { pub fn get_signed_channel_announcement( &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig ) -> Option where NS::Target: NodeSigner { - if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height { + if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { return None; } let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) { @@ -5923,8 +5932,8 @@ impl Channel { /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d pub fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { - assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32); - assert_ne!(self.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); + assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32); + assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming // current to_remote balances. However, it no longer has any use, and thus is now simply // set to a dummy (but valid, as required by the spec) public key. @@ -5933,8 +5942,8 @@ impl Channel { // valid, and valid in fuzzing mode's arbitrary validity criteria: let mut pk = [2; 33]; pk[1] = 0xff; let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); - let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { - let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap(); + let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { + let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap(); log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); remote_last_secret } else { @@ -5953,7 +5962,7 @@ impl Channel { // next_local_commitment_number is the next commitment_signed number we expect to // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number, + next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number, // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to // receive, however we track it by the next commitment number for a remote transaction // (which is one further, as they always revoke previous commitment transaction, not @@ -5961,7 +5970,7 @@ impl Channel { // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't // overflow here. - next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number - 1, + next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1, your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey, // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction @@ -6011,10 +6020,10 @@ impl Channel { fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L) -> Result, ChannelError> where L::Target: Logger { - if (self.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) { + if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); } - let channel_total_msat = self.channel_value_satoshis * 1000; + let channel_total_msat = self.context.channel_value_satoshis * 1000; if amount_msat > channel_total_msat { return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat))); } @@ -6034,7 +6043,7 @@ impl Channel { available_balances.next_outbound_htlc_limit_msat))); } - if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 { + if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 { // Note that this should never really happen, if we're !is_live() on receipt of an // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow // the user to send directly into a !is_live() channel. However, if we @@ -6044,7 +6053,7 @@ impl Channel { return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); } - let need_holding_cell = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0; + let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0; log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat, if force_holding_cell { "into holding cell" } else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" } @@ -6056,7 +6065,7 @@ impl Channel { // Now update local state: if force_holding_cell { - self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC { + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC { amount_msat, payment_hash, cltv_expiry, @@ -6066,8 +6075,8 @@ impl Channel { return Ok(None); } - self.pending_outbound_htlcs.push(OutboundHTLCOutput { - htlc_id: self.next_holder_htlc_id, + self.context.pending_outbound_htlcs.push(OutboundHTLCOutput { + htlc_id: self.context.next_holder_htlc_id, amount_msat, payment_hash: payment_hash.clone(), cltv_expiry, @@ -6076,14 +6085,14 @@ impl Channel { }); let res = msgs::UpdateAddHTLC { - channel_id: self.channel_id, - htlc_id: self.next_holder_htlc_id, + channel_id: self.context.channel_id, + htlc_id: self.context.next_holder_htlc_id, amount_msat, payment_hash, cltv_expiry, onion_routing_packet, }; - self.next_holder_htlc_id += 1; + self.context.next_holder_htlc_id += 1; Ok(Some(res)) } @@ -6093,7 +6102,7 @@ impl Channel { // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we // fail to generate this, we still are at least at a position where upgrading their status // is acceptable. - for htlc in self.pending_inbound_htlcs.iter_mut() { + for htlc in self.context.pending_inbound_htlcs.iter_mut() { let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state { Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) } else { None }; @@ -6102,7 +6111,7 @@ impl Channel { htlc.state = state; } } - for htlc in self.pending_outbound_htlcs.iter_mut() { + for htlc in self.context.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); // Grab the preimage, if it exists, instead of cloning @@ -6111,55 +6120,55 @@ impl Channel { htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason); } } - if let Some((feerate, update_state)) = self.pending_update_fee { + if let Some((feerate, update_state)) = self.context.pending_update_fee { if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { debug_assert!(!self.is_outbound()); log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); - self.feerate_per_kw = feerate; - self.pending_update_fee = None; + self.context.feerate_per_kw = feerate; + self.context.pending_update_fee = None; } } - self.resend_order = RAACommitmentOrder::RevokeAndACKFirst; + self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst; let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger); let htlcs: Vec<(HTLCOutputInCommitment, Option>)> = htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); - if self.announcement_sigs_state == AnnouncementSigsState::MessageSent { - self.announcement_sigs_state = AnnouncementSigsState::Committed; + if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent { + self.context.announcement_sigs_state = AnnouncementSigsState::Committed; } - self.latest_monitor_update_id += 1; + self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, + update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid: counterparty_commitment_txid, htlc_outputs: htlcs.clone(), - commitment_number: self.cur_counterparty_commitment_transaction_number, - their_per_commitment_point: self.counterparty_cur_commitment_point.unwrap() + commitment_number: self.context.cur_counterparty_commitment_transaction_number, + their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap() }] }; - self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; + self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; monitor_update } fn build_commitment_no_state_update(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys(); - let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let commitment_stats = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); #[cfg(any(test, fuzzing))] { if !self.is_outbound() { - let projected_commit_tx_info = self.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); - *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; + let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); + *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; if let Some(info) = projected_commit_tx_info { - let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len(); + let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); if info.total_pending_htlcs == total_pending_htlcs - && info.next_holder_htlc_id == self.next_holder_htlc_id - && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id - && info.feerate == self.feerate_per_kw { - let actual_fee = Self::commit_tx_fee_msat(self.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.opt_anchors()); + && info.next_holder_htlc_id == self.context.next_holder_htlc_id + && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id + && info.feerate == self.context.feerate_per_kw { + let actual_fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.opt_anchors()); assert_eq!(actual_fee, info.fee); } } @@ -6177,7 +6186,7 @@ impl Channel { self.build_commitment_no_state_update(logger); let counterparty_keys = self.build_remote_transaction_keys(); - let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let commitment_stats = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); let (signature, htlc_signatures); @@ -6187,7 +6196,7 @@ impl Channel { htlcs.push(htlc); } - let res = self.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.secp_ctx) + let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?; signature = res.0; htlc_signatures = res.1; @@ -6207,7 +6216,7 @@ impl Channel { } Ok((msgs::CommitmentSigned { - channel_id: self.channel_id, + channel_id: self.context.channel_id, signature, htlc_signatures, #[cfg(taproot)] @@ -6235,14 +6244,14 @@ impl Channel { /// Get forwarding information for the counterparty. pub fn counterparty_forwarding_info(&self) -> Option { - self.counterparty_forwarding_info.clone() + self.context.counterparty_forwarding_info.clone() } pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> { - if msg.contents.htlc_minimum_msat >= self.channel_value_satoshis * 1000 { + if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 { return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string())); } - self.counterparty_forwarding_info = Some(CounterpartyForwardingInfo { + self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo { fee_base_msat: msg.contents.fee_base_msat, fee_proportional_millionths: msg.contents.fee_proportional_millionths, cltv_expiry_delta: msg.contents.cltv_expiry_delta @@ -6260,35 +6269,35 @@ impl Channel { target_feerate_sats_per_kw: Option, override_shutdown_script: Option) -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError> where SP::Target: SignerProvider { - for htlc in self.pending_outbound_htlcs.iter() { + for htlc in self.context.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()}); } } - if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 { - if (self.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 { + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 { + if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 { return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()}); } - else if (self.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 { + else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 { return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); } } - if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { + if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()}); } - assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); + if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); } // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown // script is set, we just force-close and call it a day. let mut chan_closed = false; - if self.channel_state < ChannelState::FundingSent as u32 { + if self.context.channel_state < ChannelState::FundingSent as u32 { chan_closed = true; } - let update_shutdown_script = match self.shutdown_scriptpubkey { + let update_shutdown_script = match self.context.shutdown_scriptpubkey { Some(_) => false, None if !chan_closed => { // use override shutdown script if provided @@ -6305,44 +6314,44 @@ impl Channel { if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); } - self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); + self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true }, None => false, }; // From here on out, we may not fail! - self.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - if self.channel_state < ChannelState::FundingSent as u32 { - self.channel_state = ChannelState::ShutdownComplete as u32; + self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; + if self.context.channel_state < ChannelState::FundingSent as u32 { + self.context.channel_state = ChannelState::ShutdownComplete as u32; } else { - self.channel_state |= ChannelState::LocalShutdownSent as u32; + self.context.channel_state |= ChannelState::LocalShutdownSent as u32; } - self.update_time_counter += 1; + self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { - self.latest_monitor_update_id += 1; + self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, + update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey: self.get_closing_scriptpubkey(), }], }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); if self.push_blockable_mon_update(monitor_update) { - self.pending_monitor_updates.last().map(|upd| &upd.update) + self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None } } else { None }; let shutdown = msgs::Shutdown { - channel_id: self.channel_id, + channel_id: self.context.channel_id, scriptpubkey: self.get_closing_scriptpubkey(), }; // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send // our shutdown until we've committed all of the pending changes. - self.holding_cell_update_fee = None; - let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len()); - self.holding_cell_htlc_updates.retain(|htlc_update| { + self.context.holding_cell_update_fee = None; + let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); + self.context.holding_cell_htlc_updates.retain(|htlc_update| { match htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { dropped_outbound_htlcs.push((source.clone(), payment_hash.clone())); @@ -6368,16 +6377,16 @@ impl Channel { // called during initialization prior to the chain_monitor in the encompassing ChannelManager // being fully configured in some cases. Thus, its likely any monitor events we generate will // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. - assert!(self.channel_state != ChannelState::ShutdownComplete as u32); + assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32); // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and // return them to fail the payment. - let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len()); + let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); let counterparty_node_id = self.get_counterparty_node_id(); - for htlc_update in self.holding_cell_htlc_updates.drain(..) { + for htlc_update in self.context.holding_cell_htlc_updates.drain(..) { match htlc_update { HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { - dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id)); + dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id)); }, _ => {} } @@ -6390,22 +6399,22 @@ impl Channel { // funding transaction, don't return a funding txo (which prevents providing the // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. - if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { - self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; + if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { + self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, + update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], })) } else { None } } else { None }; - self.channel_state = ChannelState::ShutdownComplete as u32; - self.update_time_counter += 1; + self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.update_time_counter += 1; (monitor_update, dropped_outbound_htlcs) } pub fn inflight_htlc_sources(&self) -> impl Iterator { - self.holding_cell_htlc_updates.iter() + self.context.holding_cell_htlc_updates.iter() .flat_map(|htlc_update| { match htlc_update { HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } @@ -6413,7 +6422,7 @@ impl Channel { _ => None, } }) - .chain(self.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash))) + .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash))) } } @@ -6484,7 +6493,7 @@ impl Writeable for Channel { // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write // the low bytes now and the optional high bytes later. - let user_id_low = self.user_id as u64; + let user_id_low = self.context.user_id as u64; user_id_low.write(writer)?; // Version 1 deserializers expected to read parts of the config object here. Version 2 @@ -6492,14 +6501,14 @@ impl Writeable for Channel { // `minimum_depth` we simply write dummy values here. writer.write_all(&[0; 8])?; - self.channel_id.write(writer)?; - (self.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?; - self.channel_value_satoshis.write(writer)?; + self.context.channel_id.write(writer)?; + (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?; + self.context.channel_value_satoshis.write(writer)?; - self.latest_monitor_update_id.write(writer)?; + self.context.latest_monitor_update_id.write(writer)?; let mut key_data = VecWriter(Vec::new()); - self.holder_signer.write(&mut key_data)?; + self.context.holder_signer.write(&mut key_data)?; assert!(key_data.0.len() < core::usize::MAX); assert!(key_data.0.len() < core::u32::MAX as usize); (key_data.0.len() as u32).write(writer)?; @@ -6507,24 +6516,24 @@ impl Writeable for Channel { // Write out the old serialization for shutdown_pubkey for backwards compatibility, if // deserialized from that format. - match self.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) { + match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) { Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?, None => [0u8; PUBLIC_KEY_SIZE].write(writer)?, } - self.destination_script.write(writer)?; + self.context.destination_script.write(writer)?; - self.cur_holder_commitment_transaction_number.write(writer)?; - self.cur_counterparty_commitment_transaction_number.write(writer)?; - self.value_to_self_msat.write(writer)?; + self.context.cur_holder_commitment_transaction_number.write(writer)?; + self.context.cur_counterparty_commitment_transaction_number.write(writer)?; + self.context.value_to_self_msat.write(writer)?; let mut dropped_inbound_htlcs = 0; - for htlc in self.pending_inbound_htlcs.iter() { + for htlc in self.context.pending_inbound_htlcs.iter() { if let InboundHTLCState::RemoteAnnounced(_) = htlc.state { dropped_inbound_htlcs += 1; } } - (self.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; - for htlc in self.pending_inbound_htlcs.iter() { + (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; + for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { continue; // Drop } @@ -6554,8 +6563,8 @@ impl Writeable for Channel { let mut preimages: Vec<&Option> = vec![]; - (self.pending_outbound_htlcs.len() as u64).write(writer)?; - for htlc in self.pending_outbound_htlcs.iter() { + (self.context.pending_outbound_htlcs.len() as u64).write(writer)?; + for htlc in self.context.pending_outbound_htlcs.iter() { htlc.htlc_id.write(writer)?; htlc.amount_msat.write(writer)?; htlc.cltv_expiry.write(writer)?; @@ -6593,8 +6602,8 @@ impl Writeable for Channel { } } - (self.holding_cell_htlc_updates.len() as u64).write(writer)?; - for update in self.holding_cell_htlc_updates.iter() { + (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?; + for update in self.context.holding_cell_htlc_updates.iter() { match update { &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => { 0u8.write(writer)?; @@ -6617,43 +6626,43 @@ impl Writeable for Channel { } } - match self.resend_order { + match self.context.resend_order { RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?, RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?, } - self.monitor_pending_channel_ready.write(writer)?; - self.monitor_pending_revoke_and_ack.write(writer)?; - self.monitor_pending_commitment_signed.write(writer)?; + self.context.monitor_pending_channel_ready.write(writer)?; + self.context.monitor_pending_revoke_and_ack.write(writer)?; + self.context.monitor_pending_commitment_signed.write(writer)?; - (self.monitor_pending_forwards.len() as u64).write(writer)?; - for &(ref pending_forward, ref htlc_id) in self.monitor_pending_forwards.iter() { + (self.context.monitor_pending_forwards.len() as u64).write(writer)?; + for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() { pending_forward.write(writer)?; htlc_id.write(writer)?; } - (self.monitor_pending_failures.len() as u64).write(writer)?; - for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.monitor_pending_failures.iter() { + (self.context.monitor_pending_failures.len() as u64).write(writer)?; + for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() { htlc_source.write(writer)?; payment_hash.write(writer)?; fail_reason.write(writer)?; } if self.is_outbound() { - self.pending_update_fee.map(|(a, _)| a).write(writer)?; - } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.pending_update_fee { + self.context.pending_update_fee.map(|(a, _)| a).write(writer)?; + } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee { Some(feerate).write(writer)?; } else { // As for inbound HTLCs, if the update was only announced and never committed in a // commitment_signed, drop it. None::.write(writer)?; } - self.holding_cell_update_fee.write(writer)?; + self.context.holding_cell_update_fee.write(writer)?; - self.next_holder_htlc_id.write(writer)?; - (self.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?; - self.update_time_counter.write(writer)?; - self.feerate_per_kw.write(writer)?; + self.context.next_holder_htlc_id.write(writer)?; + (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?; + self.context.update_time_counter.write(writer)?; + self.context.feerate_per_kw.write(writer)?; // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here, // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe @@ -6661,25 +6670,25 @@ impl Writeable for Channel { // consider the stale state on reload. 0u8.write(writer)?; - self.funding_tx_confirmed_in.write(writer)?; - self.funding_tx_confirmation_height.write(writer)?; - self.short_channel_id.write(writer)?; + self.context.funding_tx_confirmed_in.write(writer)?; + self.context.funding_tx_confirmation_height.write(writer)?; + self.context.short_channel_id.write(writer)?; - self.counterparty_dust_limit_satoshis.write(writer)?; - self.holder_dust_limit_satoshis.write(writer)?; - self.counterparty_max_htlc_value_in_flight_msat.write(writer)?; + self.context.counterparty_dust_limit_satoshis.write(writer)?; + self.context.holder_dust_limit_satoshis.write(writer)?; + self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?; // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead. - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?; + self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?; - self.counterparty_htlc_minimum_msat.write(writer)?; - self.holder_htlc_minimum_msat.write(writer)?; - self.counterparty_max_accepted_htlcs.write(writer)?; + self.context.counterparty_htlc_minimum_msat.write(writer)?; + self.context.holder_htlc_minimum_msat.write(writer)?; + self.context.counterparty_max_accepted_htlcs.write(writer)?; // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead. - self.minimum_depth.unwrap_or(0).write(writer)?; + self.context.minimum_depth.unwrap_or(0).write(writer)?; - match &self.counterparty_forwarding_info { + match &self.context.counterparty_forwarding_info { Some(info) => { 1u8.write(writer)?; info.fee_base_msat.write(writer)?; @@ -6689,23 +6698,23 @@ impl Writeable for Channel { None => 0u8.write(writer)? } - self.channel_transaction_parameters.write(writer)?; - self.funding_transaction.write(writer)?; + self.context.channel_transaction_parameters.write(writer)?; + self.context.funding_transaction.write(writer)?; - self.counterparty_cur_commitment_point.write(writer)?; - self.counterparty_prev_commitment_point.write(writer)?; - self.counterparty_node_id.write(writer)?; + self.context.counterparty_cur_commitment_point.write(writer)?; + self.context.counterparty_prev_commitment_point.write(writer)?; + self.context.counterparty_node_id.write(writer)?; - self.counterparty_shutdown_scriptpubkey.write(writer)?; + self.context.counterparty_shutdown_scriptpubkey.write(writer)?; - self.commitment_secrets.write(writer)?; + self.context.commitment_secrets.write(writer)?; - self.channel_update_status.write(writer)?; + self.context.channel_update_status.write(writer)?; #[cfg(any(test, fuzzing))] - (self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; + (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; #[cfg(any(test, fuzzing))] - for htlc in self.historical_inbound_htlc_fulfills.iter() { + for htlc in self.context.historical_inbound_htlc_fulfills.iter() { htlc.write(writer)?; } @@ -6713,62 +6722,62 @@ impl Writeable for Channel { // older clients fail to deserialize this channel at all. If the type is // only-static-remote-key, we simply consider it "default" and don't write the channel type // out at all. - let chan_type = if self.channel_type != ChannelTypeFeatures::only_static_remote_key() { - Some(&self.channel_type) } else { None }; + let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() { + Some(&self.context.channel_type) } else { None }; // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to // a different percentage of the channel value then 10%, which older versions of LDK used // to set it to before the percentage was made configurable. let serialized_holder_selected_reserve = - if self.holder_selected_channel_reserve_satoshis != Self::get_legacy_default_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) - { Some(self.holder_selected_channel_reserve_satoshis) } else { None }; + if self.context.holder_selected_channel_reserve_satoshis != Self::get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis) + { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None }; let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config; old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY; let serialized_holder_htlc_max_in_flight = - if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config) - { Some(self.holder_max_htlc_value_in_flight_msat) } else { None }; + if self.context.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config) + { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None }; - let channel_pending_event_emitted = Some(self.channel_pending_event_emitted); - let channel_ready_event_emitted = Some(self.channel_ready_event_emitted); + let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted); + let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore, // we write the high bytes as an option here. - let user_id_high_opt = Some((self.user_id >> 64) as u64); + let user_id_high_opt = Some((self.context.user_id >> 64) as u64); - let holder_max_accepted_htlcs = if self.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.holder_max_accepted_htlcs) }; + let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) }; write_tlv_fields!(writer, { - (0, self.announcement_sigs, option), + (0, self.context.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a // default value instead of being Option<>al. Thus, to maintain compatibility we write // them twice, once with their original default values above, and once as an option // here. On the read side, old versions will simply ignore the odd-type entries here, // and new versions map the default values to None and allow the TLV entries here to // override that. - (1, self.minimum_depth, option), + (1, self.context.minimum_depth, option), (2, chan_type, option), - (3, self.counterparty_selected_channel_reserve_satoshis, option), + (3, self.context.counterparty_selected_channel_reserve_satoshis, option), (4, serialized_holder_selected_reserve, option), - (5, self.config, required), + (5, self.context.config, required), (6, serialized_holder_htlc_max_in_flight, option), - (7, self.shutdown_scriptpubkey, option), - (9, self.target_closing_feerate_sats_per_kw, option), - (11, self.monitor_pending_finalized_fulfills, vec_type), - (13, self.channel_creation_height, required), + (7, self.context.shutdown_scriptpubkey, option), + (9, self.context.target_closing_feerate_sats_per_kw, option), + (11, self.context.monitor_pending_finalized_fulfills, vec_type), + (13, self.context.channel_creation_height, required), (15, preimages, vec_type), - (17, self.announcement_sigs_state, required), - (19, self.latest_inbound_scid_alias, option), - (21, self.outbound_scid_alias, required), + (17, self.context.announcement_sigs_state, required), + (19, self.context.latest_inbound_scid_alias, option), + (21, self.context.outbound_scid_alias, required), (23, channel_ready_event_emitted, option), (25, user_id_high_opt, option), - (27, self.channel_keys_id, required), + (27, self.context.channel_keys_id, required), (28, holder_max_accepted_htlcs, option), - (29, self.temporary_channel_id, option), + (29, self.context.temporary_channel_id, option), (31, channel_pending_event_emitted, option), - (33, self.pending_monitor_updates, vec_type), + (33, self.context.pending_monitor_updates, vec_type), }); Ok(()) @@ -7125,122 +7134,124 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS); Ok(Channel { - user_id, + context: ChannelContext { + user_id, - config: config.unwrap(), + config: config.unwrap(), - prev_config: None, + prev_config: None, - // Note that we don't care about serializing handshake limits as we only ever serialize - // channel data after the handshake has completed. - inbound_handshake_limits_override: None, + // Note that we don't care about serializing handshake limits as we only ever serialize + // channel data after the handshake has completed. + inbound_handshake_limits_override: None, - channel_id, - temporary_channel_id, - channel_state, - announcement_sigs_state: announcement_sigs_state.unwrap(), - secp_ctx, - channel_value_satoshis, - - latest_monitor_update_id, - - holder_signer, - shutdown_scriptpubkey, - destination_script, - - cur_holder_commitment_transaction_number, - cur_counterparty_commitment_transaction_number, - value_to_self_msat, - - holder_max_accepted_htlcs, - pending_inbound_htlcs, - pending_outbound_htlcs, - holding_cell_htlc_updates, - - resend_order, - - monitor_pending_channel_ready, - monitor_pending_revoke_and_ack, - monitor_pending_commitment_signed, - monitor_pending_forwards, - monitor_pending_failures, - monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), - - pending_update_fee, - holding_cell_update_fee, - next_holder_htlc_id, - next_counterparty_htlc_id, - update_time_counter, - feerate_per_kw, + channel_id, + temporary_channel_id, + channel_state, + announcement_sigs_state: announcement_sigs_state.unwrap(), + secp_ctx, + channel_value_satoshis, - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((0, 0)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((0, 0)), + latest_monitor_update_id, - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw, + holder_signer, + shutdown_scriptpubkey, + destination_script, - inbound_awaiting_accept: false, + cur_holder_commitment_transaction_number, + cur_counterparty_commitment_transaction_number, + value_to_self_msat, - funding_tx_confirmed_in, - funding_tx_confirmation_height, - short_channel_id, - channel_creation_height: channel_creation_height.unwrap(), + holder_max_accepted_htlcs, + pending_inbound_htlcs, + pending_outbound_htlcs, + holding_cell_htlc_updates, - counterparty_dust_limit_satoshis, - holder_dust_limit_satoshis, - counterparty_max_htlc_value_in_flight_msat, - holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(), - counterparty_selected_channel_reserve_satoshis, - holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(), - counterparty_htlc_minimum_msat, - holder_htlc_minimum_msat, - counterparty_max_accepted_htlcs, - minimum_depth, + resend_order, - counterparty_forwarding_info, + monitor_pending_channel_ready, + monitor_pending_revoke_and_ack, + monitor_pending_commitment_signed, + monitor_pending_forwards, + monitor_pending_failures, + monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), - channel_transaction_parameters: channel_parameters, - funding_transaction, + pending_update_fee, + holding_cell_update_fee, + next_holder_htlc_id, + next_counterparty_htlc_id, + update_time_counter, + feerate_per_kw, - counterparty_cur_commitment_point, - counterparty_prev_commitment_point, - counterparty_node_id, + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((0, 0)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((0, 0)), - counterparty_shutdown_scriptpubkey, + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw, - commitment_secrets, + inbound_awaiting_accept: false, - channel_update_status, - closing_signed_in_flight: false, + funding_tx_confirmed_in, + funding_tx_confirmation_height, + short_channel_id, + channel_creation_height: channel_creation_height.unwrap(), - announcement_sigs, + counterparty_dust_limit_satoshis, + holder_dust_limit_satoshis, + counterparty_max_htlc_value_in_flight_msat, + holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(), + counterparty_selected_channel_reserve_satoshis, + holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(), + counterparty_htlc_minimum_msat, + holder_htlc_minimum_msat, + counterparty_max_accepted_htlcs, + minimum_depth, - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + counterparty_forwarding_info, - workaround_lnd_bug_4006: None, - sent_message_awaiting_response: None, + channel_transaction_parameters: channel_parameters, + funding_transaction, - latest_inbound_scid_alias, - // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing - outbound_scid_alias: outbound_scid_alias.unwrap_or(0), + counterparty_cur_commitment_point, + counterparty_prev_commitment_point, + counterparty_node_id, - channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), - channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), + counterparty_shutdown_scriptpubkey, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills, + commitment_secrets, + + channel_update_status, + closing_signed_in_flight: false, + + announcement_sigs, + + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), - channel_type: channel_type.unwrap(), - channel_keys_id, + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, - pending_monitor_updates: pending_monitor_updates.unwrap(), + latest_inbound_scid_alias, + // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing + outbound_scid_alias: outbound_scid_alias.unwrap_or(0), + + channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), + channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), + + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills, + + channel_type: channel_type.unwrap(), + channel_keys_id, + + pending_monitor_updates: pending_monitor_updates.unwrap(), + } }) } } @@ -7428,11 +7439,11 @@ mod tests { let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0); accept_channel_msg.dust_limit_satoshis = 546; node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); - node_a_chan.holder_dust_limit_satoshis = 1560; + node_a_chan.context.holder_dust_limit_satoshis = 1560; // Put some inbound and outbound HTLCs in A's channel. let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's. - node_a_chan.pending_inbound_htlcs.push(InboundHTLCOutput { + node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput { htlc_id: 0, amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()), @@ -7440,7 +7451,7 @@ mod tests { state: InboundHTLCState::Committed, }); - node_a_chan.pending_outbound_htlcs.push(OutboundHTLCOutput { + node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { htlc_id: 1, amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's. payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()), @@ -7458,13 +7469,13 @@ mod tests { // the dust limit check. let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None); - let local_commit_fee_0_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.feerate_per_kw, 0, node_a_chan.opt_anchors()); + let local_commit_fee_0_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.opt_anchors()); assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs); // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all // of the HTLCs are seen to be above the dust limit. - node_a_chan.channel_transaction_parameters.is_outbound_from_holder = false; - let remote_commit_fee_3_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.feerate_per_kw, 3, node_a_chan.opt_anchors()); + node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false; + let remote_commit_fee_3_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.opt_anchors()); let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs); @@ -7486,32 +7497,32 @@ mod tests { let config = UserConfig::default(); let mut chan = Channel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); - let commitment_tx_fee_0_htlcs = Channel::::commit_tx_fee_msat(chan.feerate_per_kw, 0, chan.opt_anchors()); - let commitment_tx_fee_1_htlc = Channel::::commit_tx_fee_msat(chan.feerate_per_kw, 1, chan.opt_anchors()); + let commitment_tx_fee_0_htlcs = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.opt_anchors()); + let commitment_tx_fee_1_htlc = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.opt_anchors()); // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be // counted as dust when it shouldn't be. - let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.holder_dust_limit_satoshis + 1) * 1000; + let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered); let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); // If swapped: this HTLC would be counted as non-dust when it shouldn't be. - let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.holder_dust_limit_satoshis - 1) * 1000; + let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered); let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); - chan.channel_transaction_parameters.is_outbound_from_holder = false; + chan.context.channel_transaction_parameters.is_outbound_from_holder = false; // If swapped: this HTLC would be counted as non-dust when it shouldn't be. - let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.counterparty_dust_limit_satoshis + 1) * 1000; + let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered); let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); // If swapped: this HTLC would be counted as dust when it shouldn't be. - let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.counterparty_dust_limit_satoshis - 1) * 1000; + let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered); let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); @@ -7597,13 +7608,13 @@ mod tests { // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. let chan_1 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); - let chan_1_value_msat = chan_1.channel_value_satoshis * 1000; - assert_eq!(chan_1.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); + let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; + assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). let chan_2 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); - let chan_2_value_msat = chan_2.channel_value_satoshis * 1000; - assert_eq!(chan_2.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); + let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; + assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash()); @@ -7611,39 +7622,39 @@ mod tests { // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound - 1 (2%) of the `channel_value`. let chan_3 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap(); - let chan_3_value_msat = chan_3.channel_value_satoshis * 1000; - assert_eq!(chan_3.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64); + let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000; + assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). let chan_4 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap(); - let chan_4_value_msat = chan_4.channel_value_satoshis * 1000; - assert_eq!(chan_4.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); + let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000; + assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. let chan_5 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); - let chan_5_value_msat = chan_5.channel_value_satoshis * 1000; - assert_eq!(chan_5.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); + let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; + assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `new_outbound` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. let chan_6 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); - let chan_6_value_msat = chan_6.channel_value_satoshis * 1000; - assert_eq!(chan_6.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); + let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; + assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. let chan_7 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap(); - let chan_7_value_msat = chan_7.channel_value_satoshis * 1000; - assert_eq!(chan_7.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64); + let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000; + assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64); // Test that `new_from_req` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. let chan_8 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap(); - let chan_8_value_msat = chan_8.channel_value_satoshis * 1000; - assert_eq!(chan_8.holder_max_htlc_value_in_flight_msat, chan_8_value_msat); + let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000; + assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat); } #[test] @@ -7683,8 +7694,8 @@ mod tests { outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; let chan = Channel::::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); - let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); - assert_eq!(chan.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); + let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); + assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash()); let mut inbound_node_config = UserConfig::default(); @@ -7693,10 +7704,10 @@ mod tests { if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 { let chan_inbound_node = Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap(); - let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64); + let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64); - assert_eq!(chan_inbound_node.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve); - assert_eq!(chan_inbound_node.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve); + assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve); + assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve); } else { // Channel Negotiations failed let result = Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42); @@ -7717,8 +7728,8 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let mut node_a_chan = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); - assert!(node_a_chan.counterparty_forwarding_info.is_none()); - assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1); // the default + assert!(node_a_chan.context.counterparty_forwarding_info.is_none()); + assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default assert!(node_a_chan.counterparty_forwarding_info().is_none()); // Make sure that receiving a channel update will update the Channel as expected. @@ -7741,7 +7752,7 @@ mod tests { // The counterparty can send an update with a higher minimum HTLC, but that shouldn't // change our official htlc_minimum_msat. - assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1); + assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); match node_a_chan.counterparty_forwarding_info() { Some(info) => { assert_eq!(info.cltv_expiry_delta, 100); @@ -7796,8 +7807,8 @@ mod tests { let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; let mut chan = Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test - chan.holder_dust_limit_satoshis = 546; - chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel + chan.context.holder_dust_limit_satoshis = 546; + chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; @@ -7808,13 +7819,13 @@ mod tests { delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"), htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444") }; - chan.channel_transaction_parameters.counterparty_parameters = Some( + chan.context.channel_transaction_parameters.counterparty_parameters = Some( CounterpartyChannelTransactionParameters { pubkeys: counterparty_pubkeys.clone(), selected_contest_delay: 144 }); - chan.channel_transaction_parameters.funding_outpoint = Some(funding_info); - signer.provide_channel_parameters(&chan.channel_transaction_parameters); + chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info); + signer.provide_channel_parameters(&chan.context.channel_transaction_parameters); assert_eq!(counterparty_pubkeys.payment_point.serialize()[..], hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]); @@ -7828,22 +7839,22 @@ mod tests { // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not // derived from a commitment_seed, so instead we copy it here and call // build_commitment_transaction. - let delayed_payment_base = &chan.holder_signer.pubkeys().delayed_payment_basepoint; + let delayed_payment_base = &chan.context.holder_signer.pubkeys().delayed_payment_basepoint; let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); - let htlc_basepoint = &chan.holder_signer.pubkeys().htlc_basepoint; + let htlc_basepoint = &chan.context.holder_signer.pubkeys().htlc_basepoint; let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint); macro_rules! test_commitment { ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => { - chan.channel_transaction_parameters.opt_anchors = None; + chan.context.channel_transaction_parameters.opt_anchors = None; test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*); }; } macro_rules! test_commitment_with_anchors { ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => { - chan.channel_transaction_parameters.opt_anchors = Some(()); + chan.context.channel_transaction_parameters.opt_anchors = Some(()); test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*); }; } @@ -7864,7 +7875,7 @@ mod tests { let unsigned_tx = trusted_tx.built_transaction(); let redeemscript = chan.get_funding_redeemscript(); let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap(); - let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.channel_value_satoshis); + let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis); log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction))); assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig"); @@ -7883,7 +7894,7 @@ mod tests { commitment_tx.clone(), counterparty_signature, counterparty_htlc_sigs, - &chan.holder_signer.pubkeys().funding_pubkey, + &chan.context.holder_signer.pubkeys().funding_pubkey, chan.counterparty_funding_pubkey() ); let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap(); @@ -7901,7 +7912,7 @@ mod tests { let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap(); let ref htlc = htlcs[$htlc_idx]; - let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.feerate_per_kw, + let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw, chan.get_counterparty_selected_contest_delay().unwrap(), &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys); @@ -7928,7 +7939,7 @@ mod tests { let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap(); assert_eq!(signature, *(htlc_sig.1).1, "htlc sig"); let index = (htlc_sig.1).0; - let channel_parameters = chan.channel_transaction_parameters.as_holder_broadcastable(); + let channel_parameters = chan.context.channel_transaction_parameters.as_holder_broadcastable(); let trusted_tx = holder_commitment_tx.trust(); log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage)))); assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..], @@ -7944,7 +7955,7 @@ mod tests { "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); // simple commitment tx with no HTLCs - chan.value_to_self_msat = 7000000000; + chan.context.value_to_self_msat = 7000000000; test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0", "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142", @@ -7955,7 +7966,7 @@ mod tests { "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); - chan.pending_inbound_htlcs.push({ + chan.context.pending_inbound_htlcs.push({ let mut out = InboundHTLCOutput{ htlc_id: 0, amount_msat: 1000000, @@ -7966,7 +7977,7 @@ mod tests { out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner(); out }); - chan.pending_inbound_htlcs.push({ + chan.context.pending_inbound_htlcs.push({ let mut out = InboundHTLCOutput{ htlc_id: 1, amount_msat: 2000000, @@ -7977,7 +7988,7 @@ mod tests { out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner(); out }); - chan.pending_outbound_htlcs.push({ + chan.context.pending_outbound_htlcs.push({ let mut out = OutboundHTLCOutput{ htlc_id: 2, amount_msat: 2000000, @@ -7989,7 +8000,7 @@ mod tests { out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner(); out }); - chan.pending_outbound_htlcs.push({ + chan.context.pending_outbound_htlcs.push({ let mut out = OutboundHTLCOutput{ htlc_id: 3, amount_msat: 3000000, @@ -8001,7 +8012,7 @@ mod tests { out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner(); out }); - chan.pending_inbound_htlcs.push({ + chan.context.pending_inbound_htlcs.push({ let mut out = InboundHTLCOutput{ htlc_id: 4, amount_msat: 4000000, @@ -8014,8 +8025,8 @@ mod tests { }); // commitment tx with all five HTLCs untrimmed (minimum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 0; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 0; test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5", "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea", @@ -8048,8 +8059,8 @@ mod tests { } ); // commitment tx with seven outputs untrimmed (maximum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 647; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 647; test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee", "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7", @@ -8082,8 +8093,8 @@ mod tests { } ); // commitment tx with six outputs untrimmed (minimum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 648; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 648; test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5", "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9", @@ -8111,9 +8122,9 @@ mod tests { } ); // anchors: commitment tx with six outputs untrimmed (minimum dust limit) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 645; - chan.holder_dust_limit_satoshis = 1001; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 645; + chan.context.holder_dust_limit_satoshis = 1001; test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312", "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051", @@ -8141,9 +8152,9 @@ mod tests { } ); // commitment tx with six outputs untrimmed (maximum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 2069; - chan.holder_dust_limit_satoshis = 546; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 2069; + chan.context.holder_dust_limit_satoshis = 546; test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc", "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33", @@ -8171,8 +8182,8 @@ mod tests { } ); // commitment tx with five outputs untrimmed (minimum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 2070; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 2070; test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c", "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1", @@ -8195,8 +8206,8 @@ mod tests { } ); // commitment tx with five outputs untrimmed (maximum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 2194; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 2194; test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3", "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398", @@ -8219,8 +8230,8 @@ mod tests { } ); // commitment tx with four outputs untrimmed (minimum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 2195; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 2195; test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403", "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767", @@ -8238,9 +8249,9 @@ mod tests { } ); // anchors: commitment tx with four outputs untrimmed (minimum dust limit) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 2185; - chan.holder_dust_limit_satoshis = 2001; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 2185; + chan.context.holder_dust_limit_satoshis = 2001; test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0", "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5", @@ -8258,9 +8269,9 @@ mod tests { } ); // commitment tx with four outputs untrimmed (maximum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 3702; - chan.holder_dust_limit_satoshis = 546; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 3702; + chan.context.holder_dust_limit_satoshis = 546; test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169", "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75", @@ -8278,8 +8289,8 @@ mod tests { } ); // commitment tx with three outputs untrimmed (minimum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 3703; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 3703; test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e", "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1", @@ -8292,9 +8303,9 @@ mod tests { } ); // anchors: commitment tx with three outputs untrimmed (minimum dust limit) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 3687; - chan.holder_dust_limit_satoshis = 3001; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 3687; + chan.context.holder_dust_limit_satoshis = 3001; test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377", "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d", @@ -8307,9 +8318,9 @@ mod tests { } ); // commitment tx with three outputs untrimmed (maximum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 4914; - chan.holder_dust_limit_satoshis = 546; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 4914; + chan.context.holder_dust_limit_satoshis = 546; test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244", "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19", @@ -8322,63 +8333,63 @@ mod tests { } ); // commitment tx with two outputs untrimmed (minimum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 4915; - chan.holder_dust_limit_satoshis = 546; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 4915; + chan.context.holder_dust_limit_satoshis = 546; test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720", "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); // anchors: commitment tx with two outputs untrimmed (minimum dust limit) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 4894; - chan.holder_dust_limit_satoshis = 4001; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 4894; + chan.context.holder_dust_limit_satoshis = 4001; test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95", "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); // commitment tx with two outputs untrimmed (maximum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 9651180; - chan.holder_dust_limit_satoshis = 546; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 9651180; + chan.context.holder_dust_limit_satoshis = 546; test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3", "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); // commitment tx with one output untrimmed (minimum feerate) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 9651181; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 9651181; test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2", "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); // anchors: commitment tx with one output untrimmed (minimum dust limit) - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 6216010; - chan.holder_dust_limit_satoshis = 4001; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 6216010; + chan.context.holder_dust_limit_satoshis = 4001; test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf", "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); // commitment tx with fee greater than funder amount - chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000 - chan.feerate_per_kw = 9651936; - chan.holder_dust_limit_satoshis = 546; + chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000 + chan.context.feerate_per_kw = 9651936; + chan.context.holder_dust_limit_satoshis = 546; test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2", "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379", "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage - chan.value_to_self_msat = 7_000_000_000 - 2_000_000; - chan.feerate_per_kw = 253; - chan.pending_inbound_htlcs.clear(); - chan.pending_inbound_htlcs.push({ + chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000; + chan.context.feerate_per_kw = 253; + chan.context.pending_inbound_htlcs.clear(); + chan.context.pending_inbound_htlcs.push({ let mut out = InboundHTLCOutput{ htlc_id: 1, amount_msat: 2000000, @@ -8389,8 +8400,8 @@ mod tests { out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner(); out }); - chan.pending_outbound_htlcs.clear(); - chan.pending_outbound_htlcs.push({ + chan.context.pending_outbound_htlcs.clear(); + chan.context.pending_outbound_htlcs.push({ let mut out = OutboundHTLCOutput{ htlc_id: 6, amount_msat: 5000001, @@ -8402,7 +8413,7 @@ mod tests { out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner(); out }); - chan.pending_outbound_htlcs.push({ + chan.context.pending_outbound_htlcs.push({ let mut out = OutboundHTLCOutput{ htlc_id: 5, amount_msat: 5000000, @@ -8553,7 +8564,7 @@ mod tests { &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); - assert!(!channel_a.channel_type.supports_anchors_zero_fee_htlc_tx()); + assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx()); let mut expected_channel_type = ChannelTypeFeatures::empty(); expected_channel_type.set_static_remote_key_required(); @@ -8571,8 +8582,8 @@ mod tests { &open_channel_msg, 7, &config, 0, &&logger, 42 ).unwrap(); - assert_eq!(channel_a.channel_type, expected_channel_type); - assert_eq!(channel_b.channel_type, expected_channel_type); + assert_eq!(channel_a.context.channel_type, expected_channel_type); + assert_eq!(channel_b.context.channel_type, expected_channel_type); } #[cfg(anchors)] diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7c065f1ba44..8872ae6d42d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5623,7 +5623,7 @@ where }); } } - let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take(); htlc_forwards = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 0775f58bbca..1caf1340a96 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -180,8 +180,8 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let mut sender_node_per_peer_lock; let mut sender_node_peer_state_lock; let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); - chan.holder_selected_channel_reserve_satoshis = 0; - chan.holder_max_htlc_value_in_flight_msat = 100_000_000; + chan.context.holder_selected_channel_reserve_satoshis = 0; + chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; } let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); @@ -728,7 +728,7 @@ fn test_update_fee_that_funder_cannot_afford() { commit_tx_keys.clone(), non_buffer_feerate + 4, &mut htlcs, - &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() + &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; @@ -1457,7 +1457,7 @@ fn test_fee_spike_violation_fails_htlc() { commit_tx_keys.clone(), feerate_per_kw, &mut vec![(accepted_htlc_info, ())], - &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() + &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; @@ -3122,7 +3122,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 + .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000 } else { 3000000 }; let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); @@ -4990,7 +4990,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis; + .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis; // 0th HTLC: let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 1st HTLC: @@ -6099,7 +6099,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64; // Fetch a route in advance as we will be unable to once we're unable to send. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); @@ -6774,7 +6774,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let chan =create_announced_chan_between_nodes(&nodes, 0, 1); let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis; // We route 2 dust-HTLCs between A and B let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); @@ -6867,7 +6867,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis; let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -9604,7 +9604,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); - chan.holder_dust_limit_satoshis = 546; + chan.context.holder_dust_limit_satoshis = 546; } nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index d3823fc8f9e..a2ec37a8abb 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -837,7 +837,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context.closing_fee_limits.as_mut().unwrap().1 *= 10; } nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); From 1503ebbc0e0916d17f10b2730c39fb266404a457 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 11:29:44 +0200 Subject: [PATCH 02/25] Move `Channel::opt_anchors` to `ChannelContext` impl & move some util fns This is one of a series of commits to make sure methods are moved by chunks so they are easily reviewable in diffs. Unfortunately they are not purely move-only as fields need to be updated for things to compile, but these should be quite clear. --- lightning/src/ln/channel.rs | 166 +++++++++++----------- lightning/src/ln/functional_test_utils.rs | 2 +- lightning/src/ln/functional_tests.rs | 20 +-- 3 files changed, 96 insertions(+), 92 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 195e261fe6f..0f034a8bbd8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -784,6 +784,54 @@ pub(super) struct ChannelContext { pending_monitor_updates: Vec, } +impl ChannelContext { + pub(crate) fn opt_anchors(&self) -> bool { + self.channel_transaction_parameters.opt_anchors.is_some() + } +} + +// Internal utility functions for channels + +/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the +/// `channel_value_satoshis` in msat, set through +/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`] +/// +/// The effective percentage is lower bounded by 1% and upper bounded by 100%. +/// +/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel +fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 { + let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 { + 1 + } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 { + 100 + } else { + config.max_inbound_htlc_value_in_flight_percent_of_channel as u64 + }; + channel_value_satoshis * 10 * configured_percent +} + +/// Returns a minimum channel reserve value the remote needs to maintain, +/// required by us according to the configured or default +/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`] +/// +/// Guaranteed to return a value no larger than channel_value_satoshis +/// +/// This is used both for outbound and inbound channels and has lower bound +/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`. +pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 { + let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000; + cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS)) +} + +/// This is for legacy reasons, present for forward-compatibility. +/// LDK versions older than 0.0.104 don't know how read/handle values other than default +/// from storage. Hence, we use this function to not persist default values of +/// `holder_selected_channel_reserve_satoshis` for channels into storage. +pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 { + let (q, _) = channel_value_satoshis.overflowing_div(100); + cmp::min(channel_value_satoshis, cmp::max(q, 1000)) +} + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -884,50 +932,6 @@ macro_rules! secp_check { } impl Channel { - /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the - /// `channel_value_satoshis` in msat, set through - /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`] - /// - /// The effective percentage is lower bounded by 1% and upper bounded by 100%. - /// - /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel - fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 { - let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 { - 1 - } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 { - 100 - } else { - config.max_inbound_htlc_value_in_flight_percent_of_channel as u64 - }; - channel_value_satoshis * 10 * configured_percent - } - - /// Returns a minimum channel reserve value the remote needs to maintain, - /// required by us according to the configured or default - /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`] - /// - /// Guaranteed to return a value no larger than channel_value_satoshis - /// - /// This is used both for outbound and inbound channels and has lower bound - /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`. - pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 { - let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000; - cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS)) - } - - /// This is for legacy reasons, present for forward-compatibility. - /// LDK versions older than 0.0.104 don't know how read/handle values other than default - /// from storage. Hence, we use this function to not persist default values of - /// `holder_selected_channel_reserve_satoshis` for channels into storage. - pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 { - let (q, _) = channel_value_satoshis.overflowing_div(100); - cmp::min(channel_value_satoshis, cmp::max(q, 1000)) - } - - pub(crate) fn opt_anchors(&self) -> bool { - self.context.channel_transaction_parameters.opt_anchors.is_some() - } - fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { // The default channel type (ie the first one we try) depends on whether the channel is // public - if it is, we just go with `only_static_remotekey` as it's the only option @@ -1012,7 +1016,7 @@ impl Channel { if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); } - let holder_selected_channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); + let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` @@ -1123,7 +1127,7 @@ impl Channel { counterparty_dust_limit_satoshis: 0, holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, counterparty_max_htlc_value_in_flight_msat: 0, - holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel holder_selected_channel_reserve_satoshis, counterparty_htlc_minimum_msat: 0, @@ -1339,7 +1343,7 @@ impl Channel { } } - let holder_selected_channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config); + let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` @@ -1482,7 +1486,7 @@ impl Channel { counterparty_dust_limit_satoshis: msg.dust_limit_satoshis, holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000), - holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config), + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config), counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis), holder_selected_channel_reserve_satoshis, counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, @@ -1607,7 +1611,7 @@ impl Channel { ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => { if $outbound == local { // "offered HTLC output" let htlc_in_tx = get_htlc_in_commitment!($htlc, true); - let htlc_tx_fee = if self.opt_anchors() { + let htlc_tx_fee = if self.context.opt_anchors() { 0 } else { feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000 @@ -1621,7 +1625,7 @@ impl Channel { } } else { let htlc_in_tx = get_htlc_in_commitment!($htlc, false); - let htlc_tx_fee = if self.opt_anchors() { + let htlc_tx_fee = if self.context.opt_anchors() { 0 } else { feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000 @@ -2640,7 +2644,7 @@ impl Channel { on_holder_tx_holding_cell_htlcs_count: 0, }; - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() { + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; @@ -2672,7 +2676,7 @@ impl Channel { on_holder_tx_holding_cell_htlcs_count: 0, }; - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() { + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; @@ -2742,7 +2746,7 @@ impl Channel { // dependency. // This complicates the computation around dust-values, up to the one-htlc-value. let mut real_dust_limit_timeout_sat = self.context.holder_dust_limit_satoshis; - if !self.opt_anchors() { + if !self.context.opt_anchors() { real_dust_limit_timeout_sat += self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; } @@ -2768,7 +2772,7 @@ impl Channel { // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure // sending a new HTLC won't reduce their balance below our reserve threshold. let mut real_dust_limit_success_sat = self.context.counterparty_dust_limit_satoshis; - if !self.opt_anchors() { + if !self.context.opt_anchors() { real_dust_limit_success_sat += self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; } @@ -2795,7 +2799,7 @@ impl Channel { let mut remaining_msat_below_dust_exposure_limit = None; let mut dust_exposure_dust_limit_msat = 0; - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (self.context.counterparty_dust_limit_satoshis, self.context.holder_dust_limit_satoshis) } else { let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; @@ -2877,7 +2881,7 @@ impl Channel { fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { assert!(self.is_outbound()); - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, @@ -2940,12 +2944,12 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.opt_anchors()); + let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.context.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); + fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.context.opt_anchors()); } let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len() + self.context.holding_cell_htlc_updates.len(); @@ -2980,7 +2984,7 @@ impl Channel { fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { assert!(!self.is_outbound()); - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, @@ -3030,12 +3034,12 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.opt_anchors()); + let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.context.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); + fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.context.opt_anchors()); } let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); let commitment_tx_info = CommitmentTxInfoCached { @@ -3110,7 +3114,7 @@ impl Channel { } } - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() { + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; @@ -3363,11 +3367,11 @@ impl Channel { for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() { if let Some(_) = htlc.transaction_output_index { let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw, - self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(), + self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(), false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); - let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys); - let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; + let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys); + let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), @@ -3884,7 +3888,7 @@ impl Channel { let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let commitment_stats = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); - let buffer_fee_msat = Channel::::commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.opt_anchors()) * 1000; + let buffer_fee_msat = Channel::::commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? @@ -6168,7 +6172,7 @@ impl Channel { && info.next_holder_htlc_id == self.context.next_holder_htlc_id && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id && info.feerate == self.context.feerate_per_kw { - let actual_fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.opt_anchors()); + let actual_fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors()); assert_eq!(actual_fee, info.fee); } } @@ -6208,8 +6212,8 @@ impl Channel { for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", - encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), - encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &counterparty_keys)), + encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), + encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id())); } @@ -6730,13 +6734,13 @@ impl Writeable for Channel { // a different percentage of the channel value then 10%, which older versions of LDK used // to set it to before the percentage was made configurable. let serialized_holder_selected_reserve = - if self.context.holder_selected_channel_reserve_satoshis != Self::get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis) + if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis) { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None }; let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config; old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY; let serialized_holder_htlc_max_in_flight = - if self.context.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config) + if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config) { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None }; let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted); @@ -7033,8 +7037,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut announcement_sigs = None; let mut target_closing_feerate_sats_per_kw = None; let mut monitor_pending_finalized_fulfills = Some(Vec::new()); - let mut holder_selected_channel_reserve_satoshis = Some(Self::get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis)); - let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config)); + let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis)); + let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config)); // Prior to supporting channel type negotiation, all of our channels were static_remotekey // only, so we default to that if none was written. let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key()); @@ -7469,13 +7473,13 @@ mod tests { // the dust limit check. let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None); - let local_commit_fee_0_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.opt_anchors()); + let local_commit_fee_0_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors()); assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs); // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all // of the HTLCs are seen to be above the dust limit. node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false; - let remote_commit_fee_3_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.opt_anchors()); + let remote_commit_fee_3_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors()); let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs); @@ -7497,18 +7501,18 @@ mod tests { let config = UserConfig::default(); let mut chan = Channel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); - let commitment_tx_fee_0_htlcs = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.opt_anchors()); - let commitment_tx_fee_1_htlc = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.opt_anchors()); + let commitment_tx_fee_0_htlcs = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors()); + let commitment_tx_fee_1_htlc = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors()); // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be // counted as dust when it shouldn't be. - let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000; + let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered); let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); // If swapped: this HTLC would be counted as non-dust when it shouldn't be. - let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000; + let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered); let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); @@ -7516,13 +7520,13 @@ mod tests { chan.context.channel_transaction_parameters.is_outbound_from_holder = false; // If swapped: this HTLC would be counted as non-dust when it shouldn't be. - let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000; + let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered); let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); // If swapped: this HTLC would be counted as dust when it shouldn't be. - let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000; + let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered); let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d4ede8eca99..6d980632d1a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -802,7 +802,7 @@ macro_rules! get_opt_anchors { let mut per_peer_state_lock; let mut peer_state_lock; let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id); - chan.opt_anchors() + chan.context.opt_anchors() } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 1caf1340a96..10225e099cb 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; -use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; +use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, Channel, ChannelError}; use crate::ln::{chan_utils, onion_utils}; @@ -75,7 +75,7 @@ fn test_insane_channel_opens() { // Instantiate channel parameters where we push the maximum msats given our // funding satoshis let channel_value_sat = 31337; // same as funding satoshis - let channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); + let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters @@ -157,7 +157,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let feerate_per_kw = 253; let opt_anchors = false; push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap(); let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -643,7 +643,7 @@ fn test_update_fee_that_funder_cannot_afford() { let channel_id = chan.2; let secp_ctx = Secp256k1::new(); let default_config = UserConfig::default(); - let bs_channel_reserve_sats = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); + let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); let opt_anchors = false; @@ -1453,7 +1453,7 @@ fn test_fee_spike_violation_fails_htlc() { commitment_number, 95000, local_chan_balance, - local_chan.opt_anchors(), local_funding, remote_funding, + local_chan.context.opt_anchors(), local_funding, remote_funding, commit_tx_keys.clone(), feerate_per_kw, &mut vec![(accepted_htlc_info, ())], @@ -1517,7 +1517,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let mut push_amt = 100_000_000; push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); @@ -1550,7 +1550,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); // Send four HTLCs to cover the initial push_msat buffer we're required to include @@ -1606,7 +1606,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt); let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 @@ -1651,7 +1651,7 @@ fn test_chan_init_feerate_unaffordability() { // During open, we don't have a "counterparty channel reserve" to check against, so that // requirement only comes into play on the open_channel handling side. - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); open_channel_msg.push_msat += 1; @@ -1777,7 +1777,7 @@ fn test_inbound_outbound_capacity_is_not_zero() { assert_eq!(channels0.len(), 1); assert_eq!(channels1.len(), 1); - let reserve = Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config); + let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config); assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000); assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000); From 1ee0a66d21cfd3696ed93938ba08281a0c342741 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 11:57:35 +0200 Subject: [PATCH 03/25] Move `Channel::get_update_time_counter` and some other methods This is one of a series of commits to make sure methods are moved by chunks so they are easily reviewable in diffs. Unfortunately they are not purely move-only as fields need to be updated for things to compile, but these should be quite clear. This commit also uses these methods through the `context` field where needed for compilation and tests to pass due to the above change. --- lightning/src/ln/channel.rs | 190 ++++++++++++++--------------- lightning/src/ln/channelmanager.rs | 78 ++++++------ 2 files changed, 134 insertions(+), 134 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0f034a8bbd8..b1cd603f021 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -788,6 +788,48 @@ impl ChannelContext { pub(crate) fn opt_anchors(&self) -> bool { self.channel_transaction_parameters.opt_anchors.is_some() } + + /// Allowed in any state (including after shutdown) + pub fn get_update_time_counter(&self) -> u32 { + self.update_time_counter + } + + pub fn get_latest_monitor_update_id(&self) -> u64 { + self.latest_monitor_update_id + } + + pub fn should_announce(&self) -> bool { + self.config.announced_channel + } + + pub fn is_outbound(&self) -> bool { + self.channel_transaction_parameters.is_outbound_from_holder + } + + /// Gets the fee we'd want to charge for adding an HTLC output to this Channel + /// Allowed in any state (including after shutdown) + pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 { + self.config.options.forwarding_fee_base_msat + } + + /// Returns true if we've ever received a message from the remote end for this Channel + pub fn have_received_message(&self) -> bool { + self.channel_state > (ChannelState::OurInitSent as u32) + } + + /// Returns true if this channel is fully established and not known to be closing. + /// Allowed in any state (including after shutdown) + pub fn is_usable(&self) -> bool { + let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK; + (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready + } + + /// Returns true if this channel is currently available for use. This is a superset of + /// is_usable() and considers things like the channel being temporarily disabled. + /// Allowed in any state (including after shutdown) + pub fn is_live(&self) -> bool { + self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0) + } } // Internal utility functions for channels @@ -962,7 +1004,7 @@ impl Channel { /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result { - if !self.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } + if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { // We've exhausted our options return Err(()); @@ -1582,9 +1624,9 @@ impl Channel { if match update_state { // Note that these match the inclusion criteria when scanning // pending_inbound_htlcs below. - FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local }, - FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local }, - FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local }, + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); !generated_by_local }, + FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.context.is_outbound()); !generated_by_local }, + FeeUpdateState::Outbound => { assert!(self.context.is_outbound()); generated_by_local }, } { feerate_per_kw = feerate; } @@ -1592,7 +1634,7 @@ impl Channel { log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), - get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), + get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()), log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); macro_rules! get_htlc_in_commitment { @@ -1735,7 +1777,7 @@ impl Channel { let total_fee_sat = Channel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.context.channel_transaction_parameters.opt_anchors.is_some()); let anchors_val = if self.context.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; - let (value_to_self, value_to_remote) = if self.is_outbound() { + let (value_to_self, value_to_remote) = if self.context.is_outbound() { (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) } else { (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64) @@ -1840,14 +1882,14 @@ impl Channel { assert!(self.context.pending_update_fee.is_none()); let mut total_fee_satoshis = proposed_total_fee_satoshis; - let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 }; - let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 }; + let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 }; + let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 }; if value_to_holder < 0 { - assert!(self.is_outbound()); + assert!(self.context.is_outbound()); total_fee_satoshis += (-value_to_holder) as u64; } else if value_to_counterparty < 0 { - assert!(!self.is_outbound()); + assert!(!self.context.is_outbound()); total_fee_satoshis += (-value_to_counterparty) as u64; } @@ -2216,7 +2258,7 @@ impl Channel { let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; // Check sanity of message fields: - if !self.is_outbound() { + if !self.context.is_outbound() { return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); } if self.context.channel_state != ChannelState::OurInitSent as u32 { @@ -2384,7 +2426,7 @@ impl Channel { SP::Target: SignerProvider, L::Target: Logger { - if self.is_outbound() { + if self.context.is_outbound() { return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); } if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { @@ -2436,7 +2478,7 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); @@ -2477,7 +2519,7 @@ impl Channel { SP::Target: SignerProvider, L::Target: Logger { - if !self.is_outbound() { + if !self.context.is_outbound() { return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); } if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { @@ -2526,7 +2568,7 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); let funding_txo = self.get_funding_txo().unwrap(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); @@ -2737,7 +2779,7 @@ impl Channel { let mut available_capacity_msat = outbound_capacity_msat; - if self.is_outbound() { + if self.context.is_outbound() { // We should mind channel commit tx fee when computing how much of the available capacity // can be used in the next htlc. Mirrors the logic in send_htlc. // @@ -2879,7 +2921,7 @@ impl Channel { /// /// Dust HTLCs are excluded. fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(self.is_outbound()); + assert!(self.context.is_outbound()); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (0, 0) @@ -2982,7 +3024,7 @@ impl Channel { /// /// Dust HTLCs are excluded. fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(!self.is_outbound()); + assert!(!self.context.is_outbound()); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (0, 0) @@ -3151,7 +3193,7 @@ impl Channel { // Check that the remote can afford to pay for this HTLC on-chain at the current // feerate_per_kw, while maintaining their channel reserve (as required by the spec). - let remote_commit_tx_fee_msat = if self.is_outbound() { 0 } else { + let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations }; @@ -3163,7 +3205,7 @@ impl Channel { return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); } - if !self.is_outbound() { + if !self.context.is_outbound() { // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from // the spec because in the spec, the fee spike buffer requirement doesn't exist on the // receiver's side, only on the sender's. @@ -3319,7 +3361,7 @@ impl Channel { update_state == FeeUpdateState::RemoteAnnounced } else { false }; if update_fee { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())); @@ -3327,7 +3369,7 @@ impl Channel { } #[cfg(any(test, fuzzing))] { - if self.is_outbound() { + if self.context.is_outbound() { let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take(); *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; if let Some(info) = projected_commit_tx_info { @@ -3787,14 +3829,14 @@ impl Channel { if let Some((feerate, update_state)) = self.context.pending_update_fee { match update_state { FeeUpdateState::Outbound => { - debug_assert!(self.is_outbound()); + debug_assert!(self.context.is_outbound()); log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate); self.context.feerate_per_kw = feerate; self.context.pending_update_fee = None; }, - FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); }, + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); }, FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); require_commitment = true; self.context.feerate_per_kw = feerate; @@ -3873,13 +3915,13 @@ impl Channel { /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this /// [`Channel`] if `force_holding_cell` is false. fn send_update_fee(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option where L::Target: Logger { - if !self.is_outbound() { + if !self.context.is_outbound() { panic!("Cannot send fee from inbound channel"); } - if !self.is_usable() { + if !self.context.is_usable() { panic!("Cannot update fee until channel is fully established and we haven't started shutting down"); } - if !self.is_live() { + if !self.context.is_live() { panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)"); } @@ -3984,7 +4026,7 @@ impl Channel { if let Some((_, update_state)) = self.context.pending_update_fee { if update_state == FeeUpdateState::RemoteAnnounced { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); self.context.pending_update_fee = None; } } @@ -4053,7 +4095,7 @@ impl Channel { // (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. let mut funding_broadcastable = - if self.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { + if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { self.context.funding_transaction.take() } else { None }; // That said, if the funding transaction is already confirmed (ie we're active with a @@ -4069,7 +4111,7 @@ impl Channel { // the funding transaction confirmed before the monitor was persisted, or // * a 0-conf channel and intended to send the channel_ready before any broadcast at all. let channel_ready = if self.context.monitor_pending_channel_ready { - assert!(!self.is_outbound() || self.context.minimum_depth == Some(0), + assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); @@ -4121,7 +4163,7 @@ impl Channel { pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger { - if self.is_outbound() { + if self.context.is_outbound() { return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned())); } if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -4212,7 +4254,7 @@ impl Channel { } } - let update_fee = if self.is_outbound() && self.context.pending_update_fee.is_some() { + let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() { Some(msgs::UpdateFee { channel_id: self.channel_id(), feerate_per_kw: self.context.pending_update_fee.unwrap().0, @@ -4423,7 +4465,7 @@ impl Channel { // If we fail to come to consensus, we'll have to force-close. let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background); let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() }; + let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() }; // The spec requires that (when the channel does not have anchors) we only send absolute // channel fees no greater than the absolute channel fee on the current commitment @@ -4432,7 +4474,7 @@ impl Channel { // some force-closure by old nodes, but we wanted to close the channel anyway. if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw { - let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) }; + let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) }; proposed_feerate = cmp::max(proposed_feerate, min_feerate); proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate); } @@ -4446,7 +4488,7 @@ impl Channel { // if the funders' output is dust we have to know the absolute fee we're going to use. let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; - let proposed_max_total_fee_satoshis = if self.is_outbound() { + let proposed_max_total_fee_satoshis = if self.context.is_outbound() { // We always add force_close_avoidance_max_fee_satoshis to our normal // feerate-calculated fee, but allow the max to be overridden if we're using a // target feerate-calculated fee. @@ -4496,7 +4538,7 @@ impl Channel { return Ok((None, None)); } - if !self.is_outbound() { + if !self.context.is_outbound() { if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() { return self.closing_signed(fee_estimator, &msg); } @@ -4689,7 +4731,7 @@ impl Channel { return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } - if self.is_outbound() && self.context.last_sent_closing_fee.is_none() { + if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() { return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); } @@ -4777,7 +4819,7 @@ impl Channel { return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee))); } - if !self.is_outbound() { + if !self.context.is_outbound() { // They have to pay, so pick the highest fee in the overlapping range. // We should never set an upper bound aside from their full balance debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000); @@ -4997,7 +5039,7 @@ impl Channel { // Checks whether we should emit a `ChannelReady` event. pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { - self.is_usable() && !self.context.channel_ready_event_emitted + self.context.is_usable() && !self.context.channel_ready_event_emitted } // Remembers that we already emitted a `ChannelReady` event. @@ -5142,48 +5184,6 @@ impl Channel { } } - /// Allowed in any state (including after shutdown) - pub fn get_update_time_counter(&self) -> u32 { - self.context.update_time_counter - } - - pub fn get_latest_monitor_update_id(&self) -> u64 { - self.context.latest_monitor_update_id - } - - pub fn should_announce(&self) -> bool { - self.context.config.announced_channel - } - - pub fn is_outbound(&self) -> bool { - self.context.channel_transaction_parameters.is_outbound_from_holder - } - - /// Gets the fee we'd want to charge for adding an HTLC output to this Channel - /// Allowed in any state (including after shutdown) - pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 { - self.context.config.options.forwarding_fee_base_msat - } - - /// Returns true if we've ever received a message from the remote end for this Channel - pub fn have_received_message(&self) -> bool { - self.context.channel_state > (ChannelState::OurInitSent as u32) - } - - /// Returns true if this channel is fully established and not known to be closing. - /// Allowed in any state (including after shutdown) - pub fn is_usable(&self) -> bool { - let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK; - (self.context.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.context.monitor_pending_channel_ready - } - - /// Returns true if this channel is currently available for use. This is a superset of - /// is_usable() and considers things like the channel being temporarily disabled. - /// Allowed in any state (including after shutdown) - pub fn is_live(&self) -> bool { - self.is_usable() && (self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0) - } - /// Returns true if this channel has been marked as awaiting a monitor update to move forward. /// Allowed in any state (including after shutdown) pub fn is_awaiting_monitor_update(&self) -> bool { @@ -5191,7 +5191,7 @@ impl Channel { } pub fn get_latest_complete_monitor_update_id(&self) -> u64 { - if self.context.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); } + if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); } self.context.pending_monitor_updates[0].update.update_id - 1 } @@ -5283,7 +5283,7 @@ impl Channel { // Because deciding we're awaiting initial broadcast spuriously could result in // funds-loss (as we don't have a monitor, but have the funding transaction confirmed), // we hard-assert here, even in production builds. - if self.is_outbound() { assert!(self.context.funding_transaction.is_some()); } + if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); } assert!(self.context.monitor_pending_channel_ready); assert_eq!(self.context.latest_monitor_update_id, 0); return true; @@ -5406,7 +5406,7 @@ impl Channel { let txo_idx = funding_txo.index as usize; if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() || tx.output[txo_idx].value != self.context.channel_value_satoshis { - if self.is_outbound() { + if self.context.is_outbound() { // If we generated the funding transaction and it doesn't match what it // should, the client is really broken and we should just panic and // tell them off. That said, because hash collisions happen with high @@ -5419,7 +5419,7 @@ impl Channel { let err_reason = "funding tx had wrong script/value or output index"; return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { - if self.is_outbound() { + if self.context.is_outbound() { for input in tx.input.iter() { if input.witness.is_empty() { // We generated a malleable funding transaction, implying we've @@ -5539,7 +5539,7 @@ impl Channel { self.context.minimum_depth.unwrap(), funding_tx_confirmations); return Err(ClosureReason::ProcessingError { err: err_reason }); } - } else if !self.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && + } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id)); // If funding_tx_confirmed_in is unset, the channel must not be active @@ -5585,7 +5585,7 @@ impl Channel { // something in the handler for the message that prompted this message): pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { - if !self.is_outbound() { + if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } if self.context.channel_state != ChannelState::OurInitSent as u32 { @@ -5641,7 +5641,7 @@ impl Channel { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { - if self.is_outbound() { + if self.context.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { @@ -5719,7 +5719,7 @@ impl Channel { /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. pub fn get_outbound_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result where L::Target: Logger { - if !self.is_outbound() { + if !self.context.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { @@ -5778,7 +5778,7 @@ impl Channel { if !self.context.config.announced_channel { return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); } - if !self.is_usable() { + if !self.context.is_usable() { return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned())); } @@ -5813,7 +5813,7 @@ impl Channel { return None; } - if !self.is_usable() { + if !self.context.is_usable() { return None; } @@ -6126,7 +6126,7 @@ impl Channel { } if let Some((feerate, update_state)) = self.context.pending_update_fee { if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); self.context.feerate_per_kw = feerate; self.context.pending_update_fee = None; @@ -6163,7 +6163,7 @@ impl Channel { #[cfg(any(test, fuzzing))] { - if !self.is_outbound() { + if !self.context.is_outbound() { let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; if let Some(info) = projected_commit_tx_info { @@ -6652,7 +6652,7 @@ impl Writeable for Channel { fail_reason.write(writer)?; } - if self.is_outbound() { + if self.context.is_outbound() { self.context.pending_update_fee.map(|(a, _)| a).write(writer)?; } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee { Some(feerate).write(writer)?; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8872ae6d42d..75b17caead2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1484,16 +1484,16 @@ impl ChannelDetails { // message (as they are always the first message from the counterparty). // Else `Channel::get_counterparty_htlc_minimum_msat` could return the // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { + outbound_htlc_minimum_msat: if channel.context.have_received_message() { Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), }, funding_txo: channel.get_funding_txo(), // Note that accept_channel (or open_channel) is always the first message, so // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, + channel_type: if channel.context.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, + outbound_scid_alias: if channel.context.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, inbound_scid_alias: channel.latest_inbound_scid_alias(), channel_value_satoshis: channel.get_value_satoshis(), feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), @@ -1507,10 +1507,10 @@ impl ChannelDetails { confirmations_required: channel.minimum_depth(), confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), + is_outbound: channel.context.is_outbound(), + is_channel_ready: channel.context.is_usable(), + is_usable: channel.context.is_live(), + is_public: channel.context.should_announce(), inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), config: Some(channel.config()), @@ -1750,7 +1750,7 @@ macro_rules! handle_monitor_update_completion { &$self.node_signer, $self.genesis_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); let counterparty_node_id = $chan.get_counterparty_node_id(); - let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() { + let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a // channel_update later through the announcement_signatures process for public @@ -2116,7 +2116,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) + self.list_channels_with_filter(|&(_, ref channel)| channel.context.is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -2650,7 +2650,7 @@ where }, Some(chan) => chan }; - if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if // we don't allow forwards outbound over them. @@ -2669,7 +2669,7 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !chan.is_live() { // channel_disabled + if !chan.context.is_live() { // channel_disabled // If the channel_update we're going to return is disabled (i.e. the // peer has been disabled for some time), return `channel_disabled`, // otherwise return `temporary_channel_failure`. @@ -2765,7 +2765,7 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { - if !chan.should_announce() { + if !chan.context.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), action: msgs::ErrorAction::IgnoreError @@ -2802,7 +2802,7 @@ where log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; - let enabled = chan.is_usable() && match chan.channel_update_status() { + let enabled = chan.context.is_usable() && match chan.channel_update_status() { ChannelUpdateStatus::Enabled => true, ChannelUpdateStatus::DisabledStaged(_) => true, ChannelUpdateStatus::Disabled => false, @@ -2812,12 +2812,12 @@ where let unsigned = msgs::UnsignedChannelUpdate { chain_hash: self.genesis_hash, short_channel_id, - timestamp: chan.get_update_time_counter(), + timestamp: chan.context.get_update_time_counter(), flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), cltv_expiry_delta: chan.get_cltv_expiry_delta(), htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), htlc_maximum_msat: chan.get_announced_htlc_max_msat(), - fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(), + fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(), fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), }; @@ -2866,7 +2866,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { - if !chan.get().is_live() { + if !chan.get().context.is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } let funding_txo = chan.get().get_funding_txo().unwrap(); @@ -3360,7 +3360,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { Some(chan) => { - if !chan.is_usable() { + if !chan.context.is_usable() { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) }) @@ -3943,14 +3943,14 @@ where } fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { - if !chan.is_outbound() { return NotifyOption::SkipPersist; } + if !chan.context.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } - if !chan.is_live() { + if !chan.context.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; @@ -4030,13 +4030,13 @@ where } match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), - ChannelUpdateStatus::DisabledStaged(_) if chan.is_live() + ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), + ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), + ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged(_) if !chan.is_live() + ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged(mut n) if !chan.is_live() => { + ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => { n += 1; if n >= DISABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Disabled); @@ -4050,7 +4050,7 @@ where chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n)); } }, - ChannelUpdateStatus::EnabledStaged(mut n) if chan.is_live() => { + ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => { n += 1; if n >= ENABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Enabled); @@ -4227,7 +4227,7 @@ where // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use // an inbound SCID alias before the real SCID. - let scid_pref = if chan.should_announce() { + let scid_pref = if chan.context.should_announce() { chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) } else { chan.latest_inbound_scid_alias().or(chan.get_short_channel_id()) @@ -4712,8 +4712,8 @@ where } }; log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}", - highest_applied_update_id, channel.get().get_latest_monitor_update_id()); - if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { + highest_applied_update_id, channel.get().context.get_latest_monitor_update_id()); + if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id { return; } handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); @@ -4845,7 +4845,7 @@ where ) -> usize { let mut num_unfunded_channels = 0; for (_, chan) in peer.channel_by_id.iter() { - if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && + if !chan.context.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && chan.get_funding_tx_confirmations(best_block_height) == 0 { num_unfunded_channels += 1; @@ -5088,7 +5088,7 @@ where node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If we're sending an announcement_signatures, we'll send the (public) // channel_update after sending a channel_announcement when we receive our // counterparty's announcement_signatures. Thus, we only bother to send a @@ -5525,7 +5525,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_usable() { + if !chan.get().context.is_usable() { return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } @@ -5563,7 +5563,7 @@ where match peer_state.channel_by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { + if chan.get().context.should_announce() { // If the announcement is about a channel of ours which is public, some // other peer may simply be forwarding all its gossip to us. Don't provide // a scary-looking error message and return Ok instead. @@ -5612,7 +5612,7 @@ where node_id: counterparty_node_id.clone(), msg, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If the channel is in a usable state (ie the channel is not being shut // down), send a unicast channel_update to our counterparty to make sure // they have the latest channel parameters. @@ -6479,7 +6479,7 @@ where } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); - if channel.is_usable() { + if channel.context.is_usable() { log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { @@ -6911,7 +6911,7 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { - if !chan.have_received_message() { + if !chan.context.have_received_message() { // If we created this (outbound) channel while we were disconnected from the // peer we probably failed to send the open_channel message, which is now // lost. We can't have had anything pending related to this channel, so we just @@ -7957,12 +7957,12 @@ where } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || - channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -7996,7 +7996,7 @@ where } } else { log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", - log_bytes!(channel.channel_id()), channel.get_latest_monitor_update_id(), + log_bytes!(channel.channel_id()), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); channel.complete_all_mon_updates_through(monitor.get_latest_update_id()); if let Some(short_channel_id) = channel.get_short_channel_id() { @@ -8423,7 +8423,7 @@ where log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } - if chan.is_usable() { + if chan.context.is_usable() { if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. From ede8324397646ef7b06a42cfca8935059adfa1c4 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 12:15:24 +0200 Subject: [PATCH 04/25] Move `Channel::channel_id` and some other methods to `ChannelContext` impl This is one of a series of commits to make sure methods are moved by chunks so they are easily reviewable in diffs. Unfortunately they are not purely move-only as fields need to be updated for things to compile, but these should be quite clear. This commit also uses the `context` field where needed for compilation and tests to pass due to the above change. --- lightning/src/ln/channel.rs | 649 +++++++++++----------- lightning/src/ln/channelmanager.rs | 318 +++++------ lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/functional_tests.rs | 2 +- lightning/src/ln/onion_route_tests.rs | 2 +- lightning/src/ln/payment_tests.rs | 18 +- 6 files changed, 497 insertions(+), 496 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b1cd603f021..74ce190a45d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -830,6 +830,230 @@ impl ChannelContext { pub fn is_live(&self) -> bool { self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0) } + + // Public utilities: + + pub fn channel_id(&self) -> [u8; 32] { + self.channel_id + } + + // Return the `temporary_channel_id` used during channel establishment. + // + // Will return `None` for channels created prior to LDK version 0.0.115. + pub fn temporary_channel_id(&self) -> Option<[u8; 32]> { + self.temporary_channel_id + } + + pub fn minimum_depth(&self) -> Option { + self.minimum_depth + } + + /// Gets the "user_id" value passed into the construction of this channel. It has no special + /// meaning and exists only to allow users to have a persistent identifier of a channel. + pub fn get_user_id(&self) -> u128 { + self.user_id + } + + /// Gets the channel's type + pub fn get_channel_type(&self) -> &ChannelTypeFeatures { + &self.channel_type + } + + /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus, + /// is_usable() returns true). + /// Allowed in any state (including after shutdown) + pub fn get_short_channel_id(&self) -> Option { + self.short_channel_id + } + + /// Allowed in any state (including after shutdown) + pub fn latest_inbound_scid_alias(&self) -> Option { + self.latest_inbound_scid_alias + } + + /// Allowed in any state (including after shutdown) + pub fn outbound_scid_alias(&self) -> u64 { + self.outbound_scid_alias + } + + /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, + /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases. + pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) { + assert_eq!(self.outbound_scid_alias, 0); + self.outbound_scid_alias = outbound_scid_alias; + } + + /// Returns the funding_txo we either got from our peer, or were given by + /// get_outbound_funding_created. + pub fn get_funding_txo(&self) -> Option { + self.channel_transaction_parameters.funding_outpoint + } + + /// Returns the block hash in which our funding transaction was confirmed. + pub fn get_funding_tx_confirmed_in(&self) -> Option { + self.funding_tx_confirmed_in + } + + /// Returns the current number of confirmations on the funding transaction. + pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 { + if self.funding_tx_confirmation_height == 0 { + // We either haven't seen any confirmation yet, or observed a reorg. + return 0; + } + + height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1) + } + + fn get_holder_selected_contest_delay(&self) -> u16 { + self.channel_transaction_parameters.holder_selected_contest_delay + } + + fn get_holder_pubkeys(&self) -> &ChannelPublicKeys { + &self.channel_transaction_parameters.holder_pubkeys + } + + pub fn get_counterparty_selected_contest_delay(&self) -> Option { + self.channel_transaction_parameters.counterparty_parameters + .as_ref().map(|params| params.selected_contest_delay) + } + + fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys { + &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys + } + + /// Allowed in any state (including after shutdown) + pub fn get_counterparty_node_id(&self) -> PublicKey { + self.counterparty_node_id + } + + /// Allowed in any state (including after shutdown) + pub fn get_holder_htlc_minimum_msat(&self) -> u64 { + self.holder_htlc_minimum_msat + } + + /// Allowed in any state (including after shutdown), but will return none before TheirInitSent + pub fn get_holder_htlc_maximum_msat(&self) -> Option { + self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat) + } + + /// Allowed in any state (including after shutdown) + pub fn get_announced_htlc_max_msat(&self) -> u64 { + return cmp::min( + // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts + // to use full capacity. This is an effort to reduce routing failures, because in many cases + // channel might have been used to route very small values (either by honest users or as DoS). + self.channel_value_satoshis * 1000 * 9 / 10, + + self.counterparty_max_htlc_value_in_flight_msat + ); + } + + /// Allowed in any state (including after shutdown) + pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 { + self.counterparty_htlc_minimum_msat + } + + /// Allowed in any state (including after shutdown), but will return none before TheirInitSent + pub fn get_counterparty_htlc_maximum_msat(&self) -> Option { + self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat) + } + + fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option { + self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| { + let holder_reserve = self.holder_selected_channel_reserve_satoshis; + cmp::min( + (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000, + party_max_htlc_value_in_flight_msat + ) + }) + } + + pub fn get_value_satoshis(&self) -> u64 { + self.channel_value_satoshis + } + + pub fn get_fee_proportional_millionths(&self) -> u32 { + self.config.options.forwarding_fee_proportional_millionths + } + + pub fn get_cltv_expiry_delta(&self) -> u16 { + cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) + } + + pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 { + self.config.options.max_dust_htlc_exposure_msat + } + + /// Returns the previous [`ChannelConfig`] applied to this channel, if any. + pub fn prev_config(&self) -> Option { + self.prev_config.map(|prev_config| prev_config.0) + } + + // Checks whether we should emit a `ChannelPending` event. + pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool { + self.is_funding_initiated() && !self.channel_pending_event_emitted + } + + // Returns whether we already emitted a `ChannelPending` event. + pub(crate) fn channel_pending_event_emitted(&self) -> bool { + self.channel_pending_event_emitted + } + + // Remembers that we already emitted a `ChannelPending` event. + pub(crate) fn set_channel_pending_event_emitted(&mut self) { + self.channel_pending_event_emitted = true; + } + + // Checks whether we should emit a `ChannelReady` event. + pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { + self.is_usable() && !self.channel_ready_event_emitted + } + + // Remembers that we already emitted a `ChannelReady` event. + pub(crate) fn set_channel_ready_event_emitted(&mut self) { + self.channel_ready_event_emitted = true; + } + + /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once + /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will + /// no longer be considered when forwarding HTLCs. + pub fn maybe_expire_prev_config(&mut self) { + if self.prev_config.is_none() { + return; + } + let prev_config = self.prev_config.as_mut().unwrap(); + prev_config.1 += 1; + if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS { + self.prev_config = None; + } + } + + /// Returns the current [`ChannelConfig`] applied to the channel. + pub fn config(&self) -> ChannelConfig { + self.config.options + } + + /// Updates the channel's config. A bool is returned indicating whether the config update + /// applied resulted in a new ChannelUpdate message. + pub fn update_config(&mut self, config: &ChannelConfig) -> bool { + let did_channel_update = + self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths || + self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat || + self.config.options.cltv_expiry_delta != config.cltv_expiry_delta; + if did_channel_update { + self.prev_config = Some((self.config.options, 0)); + // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay + // policy change to propagate throughout the network. + self.update_time_counter += 1; + } + self.config.options = *config; + did_channel_update + } + + /// Returns true if funding_created was sent/received. + pub fn is_funding_initiated(&self) -> bool { + self.channel_state >= ChannelState::FundingSent as u32 + } } // Internal utility functions for channels @@ -1634,7 +1858,7 @@ impl Channel { log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), - get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()), + get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()), log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); macro_rules! get_htlc_in_commitment { @@ -1786,9 +2010,9 @@ impl Channel { let mut value_to_a = if local { value_to_self } else { value_to_remote }; let mut value_to_b = if local { value_to_remote } else { value_to_self }; let (funding_pubkey_a, funding_pubkey_b) = if local { - (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey) + (self.context.get_holder_pubkeys().funding_pubkey, self.context.get_counterparty_pubkeys().funding_pubkey) } else { - (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey) + (self.context.get_counterparty_pubkeys().funding_pubkey, self.context.get_holder_pubkeys().funding_pubkey) }; if value_to_a >= (broadcaster_dust_limit_satoshis as i64) { @@ -1922,9 +2146,9 @@ impl Channel { /// TODO Some magic rust shit to compile-time check this? fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { let per_commitment_point = self.context.holder_signer.get_per_commitment_point(commitment_number, &self.context.secp_ctx); - let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; - let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; - let counterparty_pubkeys = self.get_counterparty_pubkeys(); + let delayed_payment_base = &self.context.get_holder_pubkeys().delayed_payment_basepoint; + let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint; + let counterparty_pubkeys = self.context.get_counterparty_pubkeys(); TxCreationKeys::derive_new(&self.context.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) } @@ -1936,9 +2160,9 @@ impl Channel { fn build_remote_transaction_keys(&self) -> TxCreationKeys { //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we //may see payments to it! - let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint; - let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; - let counterparty_pubkeys = self.get_counterparty_pubkeys(); + let revocation_basepoint = &self.context.get_holder_pubkeys().revocation_basepoint; + let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint; + let counterparty_pubkeys = self.context.get_counterparty_pubkeys(); TxCreationKeys::derive_new(&self.context.secp_ctx, &self.context.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint) } @@ -1947,7 +2171,7 @@ impl Channel { /// pays to get_funding_redeemscript().to_v0_p2wsh()). /// Panics if called before accept_channel/new_from_req pub fn get_funding_redeemscript(&self) -> Script { - make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) + make_funding_redeemscript(&self.context.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`] @@ -1998,7 +2222,7 @@ impl Channel { InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { - log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); + log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id())); debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } return UpdateFulfillFetch::DuplicateClaim {}; @@ -2051,7 +2275,7 @@ impl Channel { }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id())); + log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id())); // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); @@ -2061,7 +2285,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.context.channel_state); + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); @@ -2087,7 +2311,7 @@ impl Channel { monitor_update, htlc_value_msat, msg: Some(msgs::UpdateFulfillHTLC { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), htlc_id: htlc_id_arg, payment_preimage: payment_preimage_arg, }), @@ -2231,7 +2455,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { htlc_id: htlc_id_arg, err_packet, @@ -2239,14 +2463,14 @@ impl Channel { return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); + log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); } Ok(Some(msgs::UpdateFailHTLC { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), htlc_id: htlc_id_arg, reason: err_packet })) @@ -2396,7 +2620,7 @@ impl Channel { log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), - encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); + encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); } @@ -2406,7 +2630,7 @@ impl Channel { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; @@ -2416,7 +2640,7 @@ impl Channel { } fn counterparty_funding_pubkey(&self) -> &PublicKey { - &self.get_counterparty_pubkeys().funding_pubkey + &self.context.get_counterparty_pubkeys().funding_pubkey } pub fn funding_created( @@ -2467,7 +2691,7 @@ impl Channel { initial_commitment_tx, msg.signature, Vec::new(), - &self.get_holder_pubkeys().funding_pubkey, + &self.context.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey() ); @@ -2478,12 +2702,12 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.get_holder_selected_contest_delay(), + shutdown_script, self.context.get_holder_selected_contest_delay(), &self.context.destination_script, (funding_txo, funding_txo_script.clone()), &self.context.channel_transaction_parameters, funding_redeemscript.clone(), self.context.channel_value_satoshis, @@ -2497,7 +2721,7 @@ impl Channel { self.context.cur_counterparty_commitment_transaction_number -= 1; self.context.cur_holder_commitment_transaction_number -= 1; - log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id())); let need_channel_ready = self.check_get_channel_ready(0).is_some(); self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); @@ -2539,7 +2763,7 @@ impl Channel { let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; @@ -2548,7 +2772,7 @@ impl Channel { let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); // They sign our commitment transaction, allowing us to broadcast the tx if we wish. - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); } } @@ -2557,7 +2781,7 @@ impl Channel { initial_commitment_tx, msg.signature, Vec::new(), - &self.get_holder_pubkeys().funding_pubkey, + &self.context.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey() ); @@ -2566,14 +2790,14 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); - let funding_txo = self.get_funding_txo().unwrap(); + let funding_txo = self.context.get_funding_txo().unwrap(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.get_holder_selected_contest_delay(), + shutdown_script, self.context.get_holder_selected_contest_delay(), &self.context.destination_script, (funding_txo, funding_txo_script), &self.context.channel_transaction_parameters, funding_redeemscript.clone(), self.context.channel_value_satoshis, @@ -2587,7 +2811,7 @@ impl Channel { self.context.cur_holder_commitment_transaction_number -= 1; self.context.cur_counterparty_commitment_transaction_number -= 1; - log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id())); let need_channel_ready = self.check_get_channel_ready(0).is_some(); self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); @@ -2661,7 +2885,7 @@ impl Channel { self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id())); Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) } @@ -2849,17 +3073,17 @@ impl Channel { self.context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) }; let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 { + if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 { remaining_msat_below_dust_exposure_limit = - Some(self.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + Some(self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); } let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 { + if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 { remaining_msat_below_dust_exposure_limit = Some(cmp::min( remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), - self.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); + self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); } @@ -3166,9 +3390,9 @@ impl Channel { let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; - if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { + if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()); + on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } @@ -3176,9 +3400,9 @@ impl Channel { let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; - if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { + if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()); + on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } @@ -3219,7 +3443,7 @@ impl Channel { if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id())); + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id())); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } else { @@ -3347,7 +3571,7 @@ impl Channel { log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), - log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); + log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); } @@ -3409,7 +3633,7 @@ impl Channel { for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() { if let Some(_) = htlc.transaction_output_index { let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw, - self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(), + self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(), false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys); @@ -3417,7 +3641,7 @@ impl Channel { let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), - encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id())); + encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id())); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); } @@ -3439,7 +3663,7 @@ impl Channel { commitment_stats.tx, msg.signature, msg.htlc_signatures.clone(), - &self.get_holder_pubkeys().funding_pubkey, + &self.context.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey() ); @@ -3537,7 +3761,7 @@ impl Channel { } else { false }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", - log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); + log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -3558,7 +3782,7 @@ impl Channel { assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), - if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id())); + if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id())); let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! @@ -3585,7 +3809,7 @@ impl Channel { match e { ChannelError::Ignore(ref msg) => { log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", - log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id())); + log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id())); // If we fail to send here, then this HTLC should // be failed backwards. Failing to send here // indicates that this HTLC may keep being put back @@ -3650,7 +3874,7 @@ impl Channel { monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", - log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, + log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len()); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); @@ -3733,7 +3957,7 @@ impl Channel { self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } - log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id())); + log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id())); let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); @@ -3862,7 +4086,7 @@ impl Channel { self.context.monitor_pending_forwards.append(&mut to_forward_infos); self.context.monitor_pending_failures.append(&mut revoked_htlcs); self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); - log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id())); return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); } @@ -3887,11 +4111,11 @@ impl Channel { monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", - log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); + log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) } else { - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id())); self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) } @@ -3941,11 +4165,11 @@ impl Channel { // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } - if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } @@ -4043,7 +4267,7 @@ impl Channel { self.context.sent_message_awaiting_response = None; self.context.channel_state |= ChannelState::PeerDisconnected as u32; - log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); + log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id())); } /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. @@ -4116,7 +4340,7 @@ impl Channel { self.context.monitor_pending_channel_ready = false; let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); Some(msgs::ChannelReady { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), next_per_commitment_point, short_channel_id_alias: Some(self.context.outbound_scid_alias), }) @@ -4152,7 +4376,7 @@ impl Channel { self.context.monitor_pending_commitment_signed = false; let order = self.context.resend_order.clone(); log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", - log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, + log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { @@ -4182,11 +4406,11 @@ impl Channel { let outbound_stats = self.get_outbound_pending_htlc_stats(None); let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", msg.feerate_per_kw, holder_tx_dust_exposure))); } - if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", msg.feerate_per_kw, counterparty_tx_dust_exposure))); } @@ -4215,7 +4439,7 @@ impl Channel { for htlc in self.context.pending_outbound_htlcs.iter() { if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state { update_add_htlcs.push(msgs::UpdateAddHTLC { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, amount_msat: htlc.amount_msat, payment_hash: htlc.payment_hash, @@ -4230,14 +4454,14 @@ impl Channel { match reason { &InboundHTLCRemovalReason::FailRelay(ref err_packet) => { update_fail_htlcs.push(msgs::UpdateFailHTLC { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, reason: err_packet.clone() }); }, &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => { update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, sha256_of_onion: sha256_of_onion.clone(), failure_code: failure_code.clone(), @@ -4245,7 +4469,7 @@ impl Channel { }, &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => { update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, payment_preimage: payment_preimage.clone(), }); @@ -4256,13 +4480,13 @@ impl Channel { let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() { Some(msgs::UpdateFee { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), feerate_per_kw: self.context.pending_update_fee.unwrap().0, }) } else { None }; log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - log_bytes!(self.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, + log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, @@ -4366,7 +4590,7 @@ impl Channel { let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); return Ok(ReestablishResponses { channel_ready: Some(msgs::ChannelReady { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), next_per_commitment_point, short_channel_id_alias: Some(self.context.outbound_scid_alias), }), @@ -4405,7 +4629,7 @@ impl Channel { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); Some(msgs::ChannelReady { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), next_per_commitment_point, short_channel_id_alias: Some(self.context.outbound_scid_alias), }) @@ -4413,9 +4637,9 @@ impl Channel { if msg.next_local_commitment_number == next_counterparty_commitment_number { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id())); + log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id())); } else { - log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); + log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id())); } Ok(ReestablishResponses { @@ -4426,9 +4650,9 @@ impl Channel { }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id())); + log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id())); } else { - log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id())); + log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id())); } if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { @@ -4695,7 +4919,7 @@ impl Channel { tx.input[0].witness.push(Vec::new()); // First is the multisig dummy - let funding_key = self.get_holder_pubkeys().funding_pubkey.serialize(); + let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize(); let counterparty_funding_key = self.counterparty_funding_pubkey().serialize(); let mut holder_sig = sig.serialize_der().to_vec(); holder_sig.push(EcdsaSighashType::All as u8); @@ -4747,7 +4971,7 @@ impl Channel { } let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); - match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { + match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { Ok(_) => {}, Err(_e) => { // The remote end may have decided to revoke their output due to inconsistent dust @@ -4865,224 +5089,6 @@ impl Channel { } } - // Public utilities: - - pub fn channel_id(&self) -> [u8; 32] { - self.context.channel_id - } - - // Return the `temporary_channel_id` used during channel establishment. - // - // Will return `None` for channels created prior to LDK version 0.0.115. - pub fn temporary_channel_id(&self) -> Option<[u8; 32]> { - self.context.temporary_channel_id - } - - pub fn minimum_depth(&self) -> Option { - self.context.minimum_depth - } - - /// Gets the "user_id" value passed into the construction of this channel. It has no special - /// meaning and exists only to allow users to have a persistent identifier of a channel. - pub fn get_user_id(&self) -> u128 { - self.context.user_id - } - - /// Gets the channel's type - pub fn get_channel_type(&self) -> &ChannelTypeFeatures { - &self.context.channel_type - } - - /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus, - /// is_usable() returns true). - /// Allowed in any state (including after shutdown) - pub fn get_short_channel_id(&self) -> Option { - self.context.short_channel_id - } - - /// Allowed in any state (including after shutdown) - pub fn latest_inbound_scid_alias(&self) -> Option { - self.context.latest_inbound_scid_alias - } - - /// Allowed in any state (including after shutdown) - pub fn outbound_scid_alias(&self) -> u64 { - self.context.outbound_scid_alias - } - /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, - /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases. - pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) { - assert_eq!(self.context.outbound_scid_alias, 0); - self.context.outbound_scid_alias = outbound_scid_alias; - } - - /// Returns the funding_txo we either got from our peer, or were given by - /// get_outbound_funding_created. - pub fn get_funding_txo(&self) -> Option { - self.context.channel_transaction_parameters.funding_outpoint - } - - /// Returns the block hash in which our funding transaction was confirmed. - pub fn get_funding_tx_confirmed_in(&self) -> Option { - self.context.funding_tx_confirmed_in - } - - /// Returns the current number of confirmations on the funding transaction. - pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 { - if self.context.funding_tx_confirmation_height == 0 { - // We either haven't seen any confirmation yet, or observed a reorg. - return 0; - } - - height.checked_sub(self.context.funding_tx_confirmation_height).map_or(0, |c| c + 1) - } - - fn get_holder_selected_contest_delay(&self) -> u16 { - self.context.channel_transaction_parameters.holder_selected_contest_delay - } - - fn get_holder_pubkeys(&self) -> &ChannelPublicKeys { - &self.context.channel_transaction_parameters.holder_pubkeys - } - - pub fn get_counterparty_selected_contest_delay(&self) -> Option { - self.context.channel_transaction_parameters.counterparty_parameters - .as_ref().map(|params| params.selected_contest_delay) - } - - fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys { - &self.context.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys - } - - /// Allowed in any state (including after shutdown) - pub fn get_counterparty_node_id(&self) -> PublicKey { - self.context.counterparty_node_id - } - - /// Allowed in any state (including after shutdown) - pub fn get_holder_htlc_minimum_msat(&self) -> u64 { - self.context.holder_htlc_minimum_msat - } - - /// Allowed in any state (including after shutdown), but will return none before TheirInitSent - pub fn get_holder_htlc_maximum_msat(&self) -> Option { - self.get_htlc_maximum_msat(self.context.holder_max_htlc_value_in_flight_msat) - } - - /// Allowed in any state (including after shutdown) - pub fn get_announced_htlc_max_msat(&self) -> u64 { - return cmp::min( - // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts - // to use full capacity. This is an effort to reduce routing failures, because in many cases - // channel might have been used to route very small values (either by honest users or as DoS). - self.context.channel_value_satoshis * 1000 * 9 / 10, - - self.context.counterparty_max_htlc_value_in_flight_msat - ); - } - - /// Allowed in any state (including after shutdown) - pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 { - self.context.counterparty_htlc_minimum_msat - } - - /// Allowed in any state (including after shutdown), but will return none before TheirInitSent - pub fn get_counterparty_htlc_maximum_msat(&self) -> Option { - self.get_htlc_maximum_msat(self.context.counterparty_max_htlc_value_in_flight_msat) - } - - fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option { - self.context.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| { - let holder_reserve = self.context.holder_selected_channel_reserve_satoshis; - cmp::min( - (self.context.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000, - party_max_htlc_value_in_flight_msat - ) - }) - } - - pub fn get_value_satoshis(&self) -> u64 { - self.context.channel_value_satoshis - } - - pub fn get_fee_proportional_millionths(&self) -> u32 { - self.context.config.options.forwarding_fee_proportional_millionths - } - - pub fn get_cltv_expiry_delta(&self) -> u16 { - cmp::max(self.context.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) - } - - pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 { - self.context.config.options.max_dust_htlc_exposure_msat - } - - /// Returns the previous [`ChannelConfig`] applied to this channel, if any. - pub fn prev_config(&self) -> Option { - self.context.prev_config.map(|prev_config| prev_config.0) - } - - // Checks whether we should emit a `ChannelPending` event. - pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool { - self.is_funding_initiated() && !self.context.channel_pending_event_emitted - } - - // Returns whether we already emitted a `ChannelPending` event. - pub(crate) fn channel_pending_event_emitted(&self) -> bool { - self.context.channel_pending_event_emitted - } - - // Remembers that we already emitted a `ChannelPending` event. - pub(crate) fn set_channel_pending_event_emitted(&mut self) { - self.context.channel_pending_event_emitted = true; - } - - // Checks whether we should emit a `ChannelReady` event. - pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { - self.context.is_usable() && !self.context.channel_ready_event_emitted - } - - // Remembers that we already emitted a `ChannelReady` event. - pub(crate) fn set_channel_ready_event_emitted(&mut self) { - self.context.channel_ready_event_emitted = true; - } - - /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once - /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will - /// no longer be considered when forwarding HTLCs. - pub fn maybe_expire_prev_config(&mut self) { - if self.context.prev_config.is_none() { - return; - } - let prev_config = self.context.prev_config.as_mut().unwrap(); - prev_config.1 += 1; - if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS { - self.context.prev_config = None; - } - } - - /// Returns the current [`ChannelConfig`] applied to the channel. - pub fn config(&self) -> ChannelConfig { - self.context.config.options - } - - /// Updates the channel's config. A bool is returned indicating whether the config update - /// applied resulted in a new ChannelUpdate message. - pub fn update_config(&mut self, config: &ChannelConfig) -> bool { - let did_channel_update = - self.context.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths || - self.context.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat || - self.context.config.options.cltv_expiry_delta != config.cltv_expiry_delta; - if did_channel_update { - self.context.prev_config = Some((self.context.config.options, 0)); - // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay - // policy change to propagate throughout the network. - self.context.update_time_counter += 1; - } - self.context.config.options = *config; - did_channel_update - } - fn internal_htlc_satisfies_config( &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig, ) -> Result<(), (&'static str, u16)> { @@ -5110,9 +5116,9 @@ impl Channel { pub fn htlc_satisfies_config( &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, ) -> Result<(), (&'static str, u16)> { - self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.config()) + self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config()) .or_else(|err| { - if let Some(prev_config) = self.prev_config() { + if let Some(prev_config) = self.context.prev_config() { self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config) } else { Err(err) @@ -5250,11 +5256,6 @@ impl Channel { .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) }) } - /// Returns true if funding_created was sent/received. - pub fn is_funding_initiated(&self) -> bool { - self.context.channel_state >= ChannelState::FundingSent as u32 - } - /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor. /// If the channel is outbound, this implies we have not yet broadcasted the funding /// transaction. If the channel is inbound, this implies simply that the channel has not @@ -5397,7 +5398,7 @@ impl Channel { NS::Target: NodeSigner, L::Target: Logger { - if let Some(funding_txo) = self.get_funding_txo() { + if let Some(funding_txo) = self.context.get_funding_txo() { for &(index_in_block, tx) in txdata.iter() { // Check if the transaction is the expected funding transaction, and if it is, // check that it pays the right amount to the right script. @@ -5448,7 +5449,7 @@ impl Channel { } for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { - log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id())); + log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id())); return Err(ClosureReason::CommitmentTxConfirmed); } } @@ -5597,7 +5598,7 @@ impl Channel { } let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.get_holder_pubkeys(); + let keys = self.context.get_holder_pubkeys(); msgs::OpenChannel { chain_hash, @@ -5609,7 +5610,7 @@ impl Channel { channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, htlc_minimum_msat: self.context.holder_htlc_minimum_msat, feerate_per_kw: self.context.feerate_per_kw as u32, - to_self_delay: self.get_holder_selected_contest_delay(), + to_self_delay: self.context.get_holder_selected_contest_delay(), max_accepted_htlcs: self.context.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, @@ -5667,7 +5668,7 @@ impl Channel { /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.get_holder_pubkeys(); + let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannel { temporary_channel_id: self.context.channel_id, @@ -5676,7 +5677,7 @@ impl Channel { channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, htlc_minimum_msat: self.context.holder_htlc_minimum_msat, minimum_depth: self.context.minimum_depth.unwrap(), - to_self_delay: self.get_holder_selected_contest_delay(), + to_self_delay: self.context.get_holder_selected_contest_delay(), max_accepted_htlcs: self.context.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, @@ -5784,17 +5785,17 @@ impl Channel { let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?); - let counterparty_node_id = NodeId::from_pubkey(&self.get_counterparty_node_id()); + let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id()); let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice(); let msg = msgs::UnsignedChannelAnnouncement { features: channelmanager::provided_channel_features(&user_config), chain_hash, - short_channel_id: self.get_short_channel_id().unwrap(), + short_channel_id: self.context.get_short_channel_id().unwrap(), node_id_1: if were_node_one { node_id } else { counterparty_node_id }, node_id_2: if were_node_one { counterparty_node_id } else { node_id }, - bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }), - bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.get_holder_pubkeys().funding_pubkey }), + bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }), + bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }), excess_data: Vec::new(), }; @@ -5826,7 +5827,7 @@ impl Channel { return None; } - log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.channel_id())); + log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id())); let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) { Ok(a) => a, Err(e) => { @@ -5851,8 +5852,8 @@ impl Channel { self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent; Some(msgs::AnnouncementSignatures { - channel_id: self.channel_id(), - short_channel_id: self.get_short_channel_id().unwrap(), + channel_id: self.context.channel_id(), + short_channel_id: self.context.get_short_channel_id().unwrap(), node_signature: our_node_sig, bitcoin_signature: our_bitcoin_sig, }) @@ -5895,10 +5896,10 @@ impl Channel { let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); - if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() { + if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() { return Err(ChannelError::Close(format!( "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", - &announcement, self.get_counterparty_node_id()))); + &announcement, self.context.get_counterparty_node_id()))); } if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() { return Err(ChannelError::Close(format!( @@ -5948,15 +5949,15 @@ impl Channel { let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); + log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id())); remote_last_secret } else { - log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id())); + log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id())); [0;32] }; self.mark_awaiting_response(); msgs::ChannelReestablish { - channel_id: self.channel_id(), + channel_id: self.context.channel_id(), // The protocol has two different commitment number concepts - the "commitment // transaction number", which starts from 0 and counts up, and the "revocation key // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track @@ -6208,14 +6209,14 @@ impl Channel { log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()), - log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id())); + log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id())); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", - encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), + encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), - log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id())); + log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id())); } } @@ -6386,7 +6387,7 @@ impl Channel { // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and // return them to fail the payment. let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); - let counterparty_node_id = self.get_counterparty_node_id(); + let counterparty_node_id = self.context.get_counterparty_node_id(); for htlc_update in self.context.holding_cell_htlc_updates.drain(..) { match htlc_update { HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { @@ -6395,7 +6396,7 @@ impl Channel { _ => {} } } - let monitor_update = if let Some(funding_txo) = self.get_funding_txo() { + let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() { // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), // returning a channel monitor update here would imply a channel monitor update before // we even registered the channel monitor to begin with, which is invalid. @@ -6405,7 +6406,7 @@ impl Channel { // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; - Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { + Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], })) @@ -7917,7 +7918,7 @@ mod tests { let ref htlc = htlcs[$htlc_idx]; let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw, - chan.get_counterparty_selected_contest_delay().unwrap(), + chan.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys); let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 75b17caead2..61fca301fa0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1473,9 +1473,9 @@ impl ChannelDetails { let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = channel.get_holder_counterparty_selected_channel_reserve_satoshis(); ChannelDetails { - channel_id: channel.channel_id(), + channel_id: channel.context.channel_id(), counterparty: ChannelCounterparty { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), features: latest_features, unspendable_punishment_reserve: to_remote_reserve_satoshis, forwarding_info: channel.counterparty_forwarding_info(), @@ -1485,17 +1485,17 @@ impl ChannelDetails { // Else `Channel::get_counterparty_htlc_minimum_msat` could return the // default `0` value set by `Channel::new_outbound`. outbound_htlc_minimum_msat: if channel.context.have_received_message() { - Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), + Some(channel.context.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: channel.context.get_counterparty_htlc_maximum_msat(), }, - funding_txo: channel.get_funding_txo(), + funding_txo: channel.context.get_funding_txo(), // Note that accept_channel (or open_channel) is always the first message, so // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.context.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, - short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.context.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.latest_inbound_scid_alias(), - channel_value_satoshis: channel.get_value_satoshis(), + channel_type: if channel.context.have_received_message() { Some(channel.context.get_channel_type().clone()) } else { None }, + short_channel_id: channel.context.get_short_channel_id(), + outbound_scid_alias: if channel.context.is_usable() { Some(channel.context.outbound_scid_alias()) } else { None }, + inbound_scid_alias: channel.context.latest_inbound_scid_alias(), + channel_value_satoshis: channel.context.get_value_satoshis(), feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), unspendable_punishment_reserve: to_self_reserve_satoshis, balance_msat: balance.balance_msat, @@ -1503,17 +1503,17 @@ impl ChannelDetails { outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, - user_channel_id: channel.get_user_id(), - confirmations_required: channel.minimum_depth(), - confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), + user_channel_id: channel.context.get_user_id(), + confirmations_required: channel.context.minimum_depth(), + confirmations: Some(channel.context.get_funding_tx_confirmations(best_block_height)), + force_close_spend_delay: channel.context.get_counterparty_selected_contest_delay(), is_outbound: channel.context.is_outbound(), is_channel_ready: channel.context.is_usable(), is_usable: channel.context.is_live(), is_public: channel.context.should_announce(), - inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), - config: Some(channel.config()), + inbound_htlc_minimum_msat: Some(channel.context.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: channel.context.get_holder_htlc_maximum_msat(), + config: Some(channel.context.config()), } } } @@ -1615,9 +1615,9 @@ macro_rules! handle_error { macro_rules! update_maps_on_chan_removal { ($self: expr, $channel: expr) => {{ - $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); + $self.id_to_peer.lock().unwrap().remove(&$channel.context.channel_id()); let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = $channel.get_short_channel_id() { + if let Some(short_id) = $channel.context.get_short_channel_id() { short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the @@ -1626,10 +1626,10 @@ macro_rules! update_maps_on_chan_removal { // also don't want a counterparty to be able to trivially cause a memory leak by simply // opening a million channels with us which are closed before we ever reach the funding // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.context.outbound_scid_alias()); debug_assert!(alias_removed); } - short_to_chan_info.remove(&$channel.outbound_scid_alias()); + short_to_chan_info.remove(&$channel.context.outbound_scid_alias()); }} } @@ -1647,7 +1647,7 @@ macro_rules! convert_chan_err { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, $channel); let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, } @@ -1697,18 +1697,18 @@ macro_rules! remove_channel { macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { - node_id: $channel.get_counterparty_node_id(), + node_id: $channel.context.get_counterparty_node_id(), msg: $channel_ready_msg, }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), + let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); - if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), + if let Some(real_scid) = $channel.context.get_short_channel_id() { + let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } }} @@ -1716,30 +1716,30 @@ macro_rules! send_channel_ready { macro_rules! emit_channel_pending_event { ($locked_events: expr, $channel: expr) => { - if $channel.should_emit_channel_pending_event() { + if $channel.context.should_emit_channel_pending_event() { $locked_events.push_back((events::Event::ChannelPending { - channel_id: $channel.channel_id(), - former_temporary_channel_id: $channel.temporary_channel_id(), - counterparty_node_id: $channel.get_counterparty_node_id(), - user_channel_id: $channel.get_user_id(), - funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(), + channel_id: $channel.context.channel_id(), + former_temporary_channel_id: $channel.context.temporary_channel_id(), + counterparty_node_id: $channel.context.get_counterparty_node_id(), + user_channel_id: $channel.context.get_user_id(), + funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(), }, None)); - $channel.set_channel_pending_event_emitted(); + $channel.context.set_channel_pending_event_emitted(); } } } macro_rules! emit_channel_ready_event { ($locked_events: expr, $channel: expr) => { - if $channel.should_emit_channel_ready_event() { - debug_assert!($channel.channel_pending_event_emitted()); + if $channel.context.should_emit_channel_ready_event() { + debug_assert!($channel.context.channel_pending_event_emitted()); $locked_events.push_back((events::Event::ChannelReady { - channel_id: $channel.channel_id(), - user_channel_id: $channel.get_user_id(), - counterparty_node_id: $channel.get_counterparty_node_id(), - channel_type: $channel.get_channel_type().clone(), + channel_id: $channel.context.channel_id(), + user_channel_id: $channel.context.get_user_id(), + counterparty_node_id: $channel.context.get_counterparty_node_id(), + channel_type: $channel.context.get_channel_type().clone(), }, None)); - $channel.set_channel_ready_event_emitted(); + $channel.context.set_channel_ready_event_emitted(); } } } @@ -1749,7 +1749,7 @@ macro_rules! handle_monitor_update_completion { let mut updates = $chan.monitor_updating_restored(&$self.logger, &$self.node_signer, $self.genesis_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); - let counterparty_node_id = $chan.get_counterparty_node_id(); + let counterparty_node_id = $chan.context.get_counterparty_node_id(); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -1765,7 +1765,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.channel_id()).unwrap_or(Vec::new()); + .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); let htlc_forwards = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -1776,7 +1776,7 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.channel_id(); + let channel_id = $chan.context.channel_id(); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -1804,16 +1804,16 @@ macro_rules! handle_new_monitor_update { match $update_res { ChannelMonitorUpdateStatus::InProgress => { log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", - log_bytes!($chan.channel_id()[..])); + log_bytes!($chan.context.channel_id()[..])); Ok(()) }, ChannelMonitorUpdateStatus::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", - log_bytes!($chan.channel_id()[..])); + log_bytes!($chan.context.channel_id()[..])); update_maps_on_chan_removal!($self, $chan); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( - "ChannelMonitor storage failure".to_owned(), $chan.channel_id(), - $chan.get_user_id(), $chan.force_shutdown(false), + "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), + $chan.context.get_user_id(), $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok())); $remove; res @@ -2057,7 +2057,7 @@ where }; let res = channel.get_open_channel(self.genesis_hash.clone()); - let temporary_channel_id = channel.channel_id(); + let temporary_channel_id = channel.context.channel_id(); match peer_state.channel_by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(fuzzing) { @@ -2171,14 +2171,14 @@ where match channel.unbroadcasted_funding() { Some(transaction) => { pending_events_lock.push_back((events::Event::DiscardFunding { - channel_id: channel.channel_id(), transaction + channel_id: channel.context.channel_id(), transaction }, None)); }, None => {}, } pending_events_lock.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: closure_reason }, None)); } @@ -2197,7 +2197,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().get_funding_txo(); + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); let their_features = &peer_state.latest_features; let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; @@ -2353,7 +2353,7 @@ where }); } - Ok(chan.get_counterparty_node_id()) + Ok(chan.context.get_counterparty_node_id()) } fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { @@ -2656,7 +2656,7 @@ where // we don't allow forwards outbound over them. break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); } - if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() { + if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means // "refuse to forward unless the SCID alias was used", so we pretend // we don't have the channel here. @@ -2679,7 +2679,7 @@ where break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); } } - if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum + if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); } if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) { @@ -2771,10 +2771,10 @@ where action: msgs::ErrorAction::IgnoreError }); } - if chan.get_short_channel_id().is_none() { + if chan.context.get_short_channel_id().is_none() { return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}); } - log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id())); self.get_channel_update_for_unicast(chan) } @@ -2790,8 +2790,8 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); - let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { + log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id())); + let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, }; @@ -2799,8 +2799,8 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); - let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; + log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id())); + let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; let enabled = chan.context.is_usable() && match chan.channel_update_status() { ChannelUpdateStatus::Enabled => true, @@ -2814,11 +2814,11 @@ where short_channel_id, timestamp: chan.context.get_update_time_counter(), flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), - cltv_expiry_delta: chan.get_cltv_expiry_delta(), - htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), - htlc_maximum_msat: chan.get_announced_htlc_max_msat(), + cltv_expiry_delta: chan.context.get_cltv_expiry_delta(), + htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(), + htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(), fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(), - fee_proportional_millionths: chan.get_fee_proportional_millionths(), + fee_proportional_millionths: chan.context.get_fee_proportional_millionths(), excess_data: Vec::new(), }; // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`. @@ -2869,7 +2869,7 @@ where if !chan.get().context.is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } - let funding_txo = chan.get().get_funding_txo().unwrap(); + let funding_txo = chan.get().context.get_funding_txo().unwrap(); let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { path: path.clone(), @@ -3106,7 +3106,7 @@ where let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None) } else { unreachable!(); }); match funding_res { Ok(funding_msg) => (funding_msg, chan), @@ -3114,7 +3114,7 @@ where mem::drop(peer_state_lock); mem::drop(per_peer_state); - let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id()); + let _ = handle_error!(self, funding_res, chan.context.get_counterparty_node_id()); return Err(APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() }); @@ -3131,16 +3131,16 @@ where }; peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), msg, }); - match peer_state.channel_by_id.entry(chan.channel_id()) { + match peer_state.channel_by_id.entry(chan.context.channel_id()) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); }, hash_map::Entry::Vacant(e) => { let mut id_to_peer = self.id_to_peer.lock().unwrap(); - if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() { + if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() { panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); } e.insert(chan); @@ -3218,7 +3218,7 @@ where let mut output_index = None; let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() { + if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { if output_index.is_some() { return Err(APIError::APIMisuseError { err: "Multiple outputs matched the expected script and value".to_owned() @@ -3282,16 +3282,16 @@ where } for channel_id in channel_ids { let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - let mut config = channel.config(); + let mut config = channel.context.config(); config.apply(config_update); - if !channel.update_config(&config) { + if !channel.context.update_config(&config) { continue; } if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg, }); } @@ -3365,7 +3365,7 @@ where err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) }) } - chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) + chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias()) }, None => return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id) @@ -3591,7 +3591,7 @@ where let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } + HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id } )); continue; } @@ -4067,7 +4067,7 @@ where _ => {}, } - chan.maybe_expire_prev_config(); + chan.context.maybe_expire_prev_config(); if chan.should_disconnect_peer_awaiting_response() { log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}", @@ -4228,9 +4228,9 @@ where // we're not leaking that we have a channel with the counterparty), otherwise we try to use // an inbound SCID alias before the real SCID. let scid_pref = if chan.context.should_announce() { - chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) + chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) } else { - chan.latest_inbound_scid_alias().or(chan.get_short_channel_id()) + chan.context.latest_inbound_scid_alias().or(chan.context.get_short_channel_id()) }; if let Some(scid) = scid_pref { self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan) @@ -4492,7 +4492,7 @@ where let mut peer_state_lock = peer_state_opt.unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { - let counterparty_node_id = chan.get().get_counterparty_node_id(); + let counterparty_node_id = chan.get().context.get_counterparty_node_id(); let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { @@ -4618,7 +4618,7 @@ where channel_ready: Option, announcement_sigs: Option) -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", - log_bytes!(channel.channel_id()), + log_bytes!(channel.context.channel_id()), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), if funding_broadcastable.is_some() { "" } else { "not " }, @@ -4627,10 +4627,10 @@ where let mut htlc_forwards = None; - let counterparty_node_id = channel.get_counterparty_node_id(); + let counterparty_node_id = channel.context.get_counterparty_node_id(); if !pending_forwards.is_empty() { - htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()), - channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards)); + htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()), + channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards)); } if let Some(msg) = channel_ready { @@ -4778,9 +4778,9 @@ where } if accept_0conf { channel.get_mut().set_0conf(); - } else if channel.get().get_channel_type().requires_zero_conf() { + } else if channel.get().context.get_channel_type().requires_zero_conf() { let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } } @@ -4794,7 +4794,7 @@ where // channels per-peer we can accept channels from a peer with existing ones. if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } } @@ -4806,7 +4806,7 @@ where } peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), msg: channel.get_mut().accept_inbound_channel(user_channel_id), }); } @@ -4845,8 +4845,8 @@ where ) -> usize { let mut num_unfunded_channels = 0; for (_, chan) in peer.channel_by_id.iter() { - if !chan.context.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && - chan.get_funding_tx_confirmations(best_block_height) == 0 + if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 && + chan.context.get_funding_tx_confirmations(best_block_height) == 0 { num_unfunded_channels += 1; } @@ -4911,14 +4911,14 @@ where }, Ok(res) => res }; - match peer_state.channel_by_id.entry(channel.channel_id()) { + match peer_state.channel_by_id.entry(channel.context.channel_id()) { hash_map::Entry::Occupied(_) => { self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) }, hash_map::Entry::Vacant(entry) => { if !self.default_configuration.manually_accept_inbound_channels { - if channel.get_channel_type().requires_zero_conf() { + if channel.context.get_channel_type().requires_zero_conf() { return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { @@ -4932,7 +4932,7 @@ where counterparty_node_id: counterparty_node_id.clone(), funding_satoshis: msg.funding_satoshis, push_msat: msg.push_msat, - channel_type: channel.get_channel_type().clone(), + channel_type: channel.context.get_channel_type().clone(), }, None)); } @@ -4955,7 +4955,7 @@ where match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); - (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) + (chan.get().context.get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } @@ -4996,14 +4996,14 @@ where Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { - match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) { + match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) { hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(i_e) => { - i_e.insert(chan.get_counterparty_node_id()); + i_e.insert(chan.context.get_counterparty_node_id()); } } @@ -5053,7 +5053,7 @@ where hash_map::Entry::Occupied(mut chan) => { let monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan); - let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor); + let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor); let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan); if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { // We weren't able to watch the channel to begin with, so no updates should be made on @@ -5083,7 +5083,7 @@ where let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id())); peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, @@ -5094,7 +5094,7 @@ where // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -5134,7 +5134,7 @@ where if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let funding_txo_opt = chan_entry.get().get_funding_txo(); + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); dropped_htlcs = htlcs; @@ -5340,7 +5340,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); + let funding_txo = chan.get().context.get_funding_txo(); let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan); if let Some(monitor_update) = monitor_update_opt { let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); @@ -5479,7 +5479,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); + let funding_txo = chan.get().context.get_funding_txo(); let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); let res = if let Some(monitor_update) = monitor_update_opt { let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); @@ -5562,7 +5562,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { + if chan.get().context.get_counterparty_node_id() != *counterparty_node_id { if chan.get().context.should_announce() { // If the announcement is about a channel of ours which is public, some // other peer may simply be forwarding all its gossip to us. Don't provide @@ -5571,7 +5571,7 @@ where } return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } - let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..]; let msg_from_node_one = msg.contents.flags & 1 == 0; if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); @@ -5618,7 +5618,7 @@ where // they have the latest channel parameters. if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { channel_update = Some(events::MessageSendEvent::SendChannelUpdate { - node_id: chan.get().get_counterparty_node_id(), + node_id: chan.get().context.get_counterparty_node_id(), msg, }); } @@ -5699,9 +5699,9 @@ where }; self.issue_channel_close_events(&chan, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() } }, }); } @@ -5750,8 +5750,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state: &mut PeerState<_> = &mut *peer_state_lock; for (channel_id, chan) in peer_state.channel_by_id.iter_mut() { - let counterparty_node_id = chan.get_counterparty_node_id(); - let funding_txo = chan.get_funding_txo(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + let funding_txo = chan.context.get_funding_txo(); let (monitor_opt, holding_cell_failed_htlcs) = chan.maybe_free_holding_cell_htlcs(&self.logger); if !holding_cell_failed_htlcs.is_empty() { @@ -5810,7 +5810,7 @@ where if let Some(msg) = msg_opt { has_update = true; pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: chan.get_counterparty_node_id(), msg, + node_id: chan.context.get_counterparty_node_id(), msg, }); } if let Some(tx) = tx_opt { @@ -5833,7 +5833,7 @@ where Err(e) => { has_update = true; let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + handle_errors.push((chan.context.get_counterparty_node_id(), Err(res))); !close_channel } } @@ -6164,7 +6164,7 @@ where } if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { - debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint); + debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() { log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); @@ -6420,7 +6420,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for chan in peer_state.channel_by_id.values() { - if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { + if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) { res.push((funding_txo.txid, Some(block_hash))); } } @@ -6432,7 +6432,7 @@ where let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); self.do_chain_event(None, |channel| { - if let Some(funding_txo) = channel.get_funding_txo() { + if let Some(funding_txo) = channel.context.get_funding_txo() { if funding_txo.txid == *txid { channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None)) } else { Ok((None, Vec::new(), None)) } @@ -6475,20 +6475,20 @@ where for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); if channel.context.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg, }); } } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id())); } } @@ -6498,9 +6498,9 @@ where } if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg: announcement_sigs, }); if let Some(height) = height_opt { @@ -6515,7 +6515,7 @@ where } } if channel.is_our_channel_ready() { - if let Some(real_scid) = channel.get_short_channel_id() { + if let Some(real_scid) = channel.context.get_short_channel_id() { // If we sent a 0conf channel_ready, and now have an SCID, we add it // to the short_to_chan_info map here. Note that we check whether we // can relay using the real SCID at relay-time (i.e. @@ -6523,8 +6523,8 @@ where // un-confirmed we force-close the channel, ensuring short_to_chan_info // is always consistent. let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); - let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), + let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", fake_scid::MAX_SCID_BLOCKS_FROM_NOW); } @@ -6542,9 +6542,9 @@ where let reason_message = format!("{}", reason); self.issue_channel_close_events(channel, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { - channel_id: channel.channel_id(), + channel_id: channel.context.channel_id(), data: reason_message, } }, }); @@ -6910,7 +6910,7 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { + let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id { if !chan.context.have_received_message() { // If we created this (outbound) channel while we were disconnected from the // peer we probably failed to send the open_channel message, which is now @@ -6919,13 +6919,13 @@ where false } else { pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), msg: chan.get_channel_reestablish(&self.logger), }); true } } else { true }; - if retain && chan.get_counterparty_node_id() != *counterparty_node_id { + if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id { if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { @@ -7553,7 +7553,7 @@ where } number_of_channels += peer_state.channel_by_id.len(); for (_, channel) in peer_state.channel_by_id.iter() { - if !channel.is_funding_initiated() { + if !channel.context.is_funding_initiated() { unfunded_channels += 1; } } @@ -7565,7 +7565,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (_, channel) in peer_state.channel_by_id.iter() { - if channel.is_funding_initiated() { + if channel.context.is_funding_initiated() { channel.write(writer)?; } } @@ -7851,7 +7851,7 @@ where pub default_config: UserConfig, /// A map from channel funding outpoints to ChannelMonitors for those channels (ie - /// value.get_funding_txo() should be the key). + /// value.context.get_funding_txo() should be the key). /// /// If a monitor is inconsistent with the channel state during deserialization the channel will /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This @@ -7941,14 +7941,14 @@ where let mut channel: Channel<::Signer> = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; - let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; + let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); + log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -7962,7 +7962,7 @@ where log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); + log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -7971,8 +7971,8 @@ where } failed_htlcs.append(&mut new_failed_htlcs); channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: ClosureReason::OutdatedChannelManager }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { @@ -7990,29 +7990,29 @@ where // backwards leg of the HTLC will simply be rejected. log_info!(args.logger, "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); - failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + log_bytes!(channel.context.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id())); } } } else { log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", - log_bytes!(channel.channel_id()), channel.context.get_latest_monitor_update_id(), + log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); channel.complete_all_mon_updates_through(monitor.get_latest_update_id()); - if let Some(short_channel_id) = channel.get_short_channel_id() { - short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); + if let Some(short_channel_id) = channel.context.get_short_channel_id() { + short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if channel.is_funding_initiated() { - id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); + if channel.context.is_funding_initiated() { + id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id()); } - match peer_channels.entry(channel.get_counterparty_node_id()) { + match peer_channels.entry(channel.context.get_counterparty_node_id()) { hash_map::Entry::Occupied(mut entry) => { let by_id_map = entry.get_mut(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.context.channel_id(), channel); }, hash_map::Entry::Vacant(entry) => { let mut by_id_map = HashMap::new(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.context.channel_id(), channel); entry.insert(by_id_map); } } @@ -8023,12 +8023,12 @@ where // safely discard the channel. let _ = channel.force_shutdown(false); channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: ClosureReason::DisconnectedPeer, }, None)); } else { - log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); + log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id())); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); @@ -8117,7 +8117,7 @@ where let peer_state = peer_mtx.lock().unwrap(); for (_, chan) in peer_state.channel_by_id.iter() { for update in chan.uncompleted_unblocked_mon_updates() { - if let Some(funding_txo) = chan.get_funding_txo() { + if let Some(funding_txo) = chan.context.get_funding_txo() { log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}", update.update_id, log_bytes!(funding_txo.to_channel_id())); pending_background_events.push( @@ -8409,25 +8409,25 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { - if chan.outbound_scid_alias() == 0 { + if chan.context.outbound_scid_alias() == 0 { let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); if outbound_scid_aliases.insert(outbound_scid_alias) { break; } } - chan.set_outbound_scid_alias(outbound_scid_alias); - } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { + chan.context.set_outbound_scid_alias(outbound_scid_alias); + } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } if chan.context.is_usable() { - if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { + if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6d980632d1a..79210068f32 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2237,10 +2237,10 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id()) .unwrap().lock().unwrap(); let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap(); - if let Some(prev_config) = channel.prev_config() { + if let Some(prev_config) = channel.context.prev_config() { prev_config.forwarding_fee_base_msat } else { - channel.config().forwarding_fee_base_msat + channel.context.config().forwarding_fee_base_msat } }; expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 10225e099cb..58977e78964 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -6172,7 +6172,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); - htlc_minimum_msat = channel.get_holder_htlc_minimum_msat(); + htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat(); } let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 36e1bd753e2..1aa3420caf5 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -510,7 +510,7 @@ fn test_onion_failure() { let short_channel_id = channels[1].0.contents.short_channel_id; let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) .unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap() - .get_counterparty_htlc_minimum_msat() - 1; + .context.get_counterparty_htlc_minimum_msat() - 1; let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].hops.len(); bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index f514fa1ed97..ab010b5a723 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -607,9 +607,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id()) .unwrap().lock().unwrap(); let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap(); - let mut new_config = channel.config(); + let mut new_config = channel.context.config(); new_config.forwarding_fee_base_msat += 100_000; - channel.update_config(&new_config); + channel.context.update_config(&new_config); new_route.paths[0].hops[0].fee_msat += 100_000; } @@ -1409,7 +1409,7 @@ fn test_trivial_inflight_htlc_tracking(){ let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel_1.get_short_channel_id().unwrap() + channel_1.context.get_short_channel_id().unwrap() ); assert_eq!(chan_1_used_liquidity, None); } @@ -1421,7 +1421,7 @@ fn test_trivial_inflight_htlc_tracking(){ let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), - channel_2.get_short_channel_id().unwrap() + channel_2.context.get_short_channel_id().unwrap() ); assert_eq!(chan_2_used_liquidity, None); @@ -1446,7 +1446,7 @@ fn test_trivial_inflight_htlc_tracking(){ let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel_1.get_short_channel_id().unwrap() + channel_1.context.get_short_channel_id().unwrap() ); // First hop accounts for expected 1000 msat fee assert_eq!(chan_1_used_liquidity, Some(501000)); @@ -1459,7 +1459,7 @@ fn test_trivial_inflight_htlc_tracking(){ let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), - channel_2.get_short_channel_id().unwrap() + channel_2.context.get_short_channel_id().unwrap() ); assert_eq!(chan_2_used_liquidity, Some(500000)); @@ -1485,7 +1485,7 @@ fn test_trivial_inflight_htlc_tracking(){ let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel_1.get_short_channel_id().unwrap() + channel_1.context.get_short_channel_id().unwrap() ); assert_eq!(chan_1_used_liquidity, None); } @@ -1497,7 +1497,7 @@ fn test_trivial_inflight_htlc_tracking(){ let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), - channel_2.get_short_channel_id().unwrap() + channel_2.context.get_short_channel_id().unwrap() ); assert_eq!(chan_2_used_liquidity, None); } @@ -1538,7 +1538,7 @@ fn test_holding_cell_inflight_htlcs() { let used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel.get_short_channel_id().unwrap() + channel.context.get_short_channel_id().unwrap() ); assert_eq!(used_liquidity, Some(2000000)); From 497aeb006f6cc004a66a660222010546af96075a Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 12:20:25 +0200 Subject: [PATCH 05/25] Move `Channel::build_commitment_transaction` to `ChannelContext` impl This is one of a series of commits to make sure methods are moved by chunks so they are easily reviewable in diffs. Unfortunately they are not purely move-only as fields need to be updated for things to compile, but these should be quite clear. This commit also uses the `context` field where needed for compilation and tests to pass due to the above change. --- lightning/src/ln/channel.rs | 530 ++++++++++++++++++------------------ 1 file changed, 265 insertions(+), 265 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 74ce190a45d..a03a470a500 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1054,6 +1054,253 @@ impl ChannelContext { pub fn is_funding_initiated(&self) -> bool { self.channel_state >= ChannelState::FundingSent as u32 } + + /// Transaction nomenclature is somewhat confusing here as there are many different cases - a + /// transaction is referred to as "a's transaction" implying that a will be able to broadcast + /// the transaction. Thus, b will generally be sending a signature over such a transaction to + /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As + /// such, a transaction is generally the result of b increasing the amount paid to a (or adding + /// an HTLC to a). + /// @local is used only to convert relevant internal structures which refer to remote vs local + /// to decide value of outputs and direction of HTLCs. + /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC + /// state may indicate that one peer has informed the other that they'd like to add an HTLC but + /// have not yet committed it. Such HTLCs will only be included in transactions which are being + /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both + /// which peer generated this transaction and "to whom" this transaction flows. + #[inline] + fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats + where L::Target: Logger + { + let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); + let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len(); + let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs); + + let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis }; + let mut remote_htlc_total_msat = 0; + let mut local_htlc_total_msat = 0; + let mut value_to_self_msat_offset = 0; + + let mut feerate_per_kw = self.feerate_per_kw; + if let Some((feerate, update_state)) = self.pending_update_fee { + if match update_state { + // Note that these match the inclusion criteria when scanning + // pending_inbound_htlcs below. + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local }, + FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local }, + FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local }, + } { + feerate_per_kw = feerate; + } + } + + log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", + commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), + get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), + log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); + + macro_rules! get_htlc_in_commitment { + ($htlc: expr, $offered: expr) => { + HTLCOutputInCommitment { + offered: $offered, + amount_msat: $htlc.amount_msat, + cltv_expiry: $htlc.cltv_expiry, + payment_hash: $htlc.payment_hash, + transaction_output_index: None + } + } + } + + macro_rules! add_htlc_output { + ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => { + if $outbound == local { // "offered HTLC output" + let htlc_in_tx = get_htlc_in_commitment!($htlc, true); + let htlc_tx_fee = if self.opt_anchors() { + 0 + } else { + feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000 + }; + if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { + log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_non_dust_htlcs.push((htlc_in_tx, $source)); + } else { + log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_dust_htlcs.push((htlc_in_tx, $source)); + } + } else { + let htlc_in_tx = get_htlc_in_commitment!($htlc, false); + let htlc_tx_fee = if self.opt_anchors() { + 0 + } else { + feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000 + }; + if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { + log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_non_dust_htlcs.push((htlc_in_tx, $source)); + } else { + log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_dust_htlcs.push((htlc_in_tx, $source)); + } + } + } + } + + for ref htlc in self.pending_inbound_htlcs.iter() { + let (include, state_name) = match htlc.state { + InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"), + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"), + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"), + InboundHTLCState::Committed => (true, "Committed"), + InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"), + }; + + if include { + add_htlc_output!(htlc, false, None, state_name); + remote_htlc_total_msat += htlc.amount_msat; + } else { + log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + match &htlc.state { + &InboundHTLCState::LocalRemoved(ref reason) => { + if generated_by_local { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + value_to_self_msat_offset += htlc.amount_msat as i64; + } + } + }, + _ => {}, + } + } + } + + let mut preimages: Vec = Vec::new(); + + for ref htlc in self.pending_outbound_htlcs.iter() { + let (include, state_name) = match htlc.state { + OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"), + OutboundHTLCState::Committed => (true, "Committed"), + OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"), + OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"), + OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"), + }; + + let preimage_opt = match htlc.state { + OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p, + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p, + _ => None, + }; + + if let Some(preimage) = preimage_opt { + preimages.push(preimage); + } + + if include { + add_htlc_output!(htlc, true, Some(&htlc.source), state_name); + local_htlc_total_msat += htlc.amount_msat; + } else { + log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + match htlc.state { + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => { + value_to_self_msat_offset -= htlc.amount_msat as i64; + }, + OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => { + if !generated_by_local { + value_to_self_msat_offset -= htlc.amount_msat as i64; + } + }, + _ => {}, + } + } + } + + let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; + assert!(value_to_self_msat >= 0); + // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie + // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to + // "violate" their reserve value by couting those against it. Thus, we have to convert + // everything to i64 before subtracting as otherwise we can overflow. + let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; + assert!(value_to_remote_msat >= 0); + + #[cfg(debug_assertions)] + { + // Make sure that the to_self/to_remote is always either past the appropriate + // channel_reserve *or* it is making progress towards it. + let mut broadcaster_max_commitment_tx_output = if generated_by_local { + self.holder_max_commitment_tx_output.lock().unwrap() + } else { + self.counterparty_max_commitment_tx_output.lock().unwrap() + }; + debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64); + broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64); + debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64); + broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64); + } + + let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some()); + let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; + let (value_to_self, value_to_remote) = if self.is_outbound() { + (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) + } else { + (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64) + }; + + let mut value_to_a = if local { value_to_self } else { value_to_remote }; + let mut value_to_b = if local { value_to_remote } else { value_to_self }; + let (funding_pubkey_a, funding_pubkey_b) = if local { + (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey) + } else { + (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey) + }; + + if value_to_a >= (broadcaster_dust_limit_satoshis as i64) { + log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a); + } else { + value_to_a = 0; + } + + if value_to_b >= (broadcaster_dust_limit_satoshis as i64) { + log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b); + } else { + value_to_b = 0; + } + + let num_nondust_htlcs = included_non_dust_htlcs.len(); + + let channel_parameters = + if local { self.channel_transaction_parameters.as_holder_broadcastable() } + else { self.channel_transaction_parameters.as_counterparty_broadcastable() }; + let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, + value_to_a as u64, + value_to_b as u64, + self.channel_transaction_parameters.opt_anchors.is_some(), + funding_pubkey_a, + funding_pubkey_b, + keys.clone(), + feerate_per_kw, + &mut included_non_dust_htlcs, + &channel_parameters + ); + let mut htlcs_included = included_non_dust_htlcs; + // The unwrap is safe, because all non-dust HTLCs have been assigned an output index + htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); + htlcs_included.append(&mut included_dust_htlcs); + + // For the stats, trimmed-to-0 the value in msats accordingly + value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat }; + value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat }; + + CommitmentStats { + tx, + feerate_per_kw, + total_fee_sat, + num_nondust_htlcs, + htlcs_included, + local_balance_msat: value_to_self_msat as u64, + remote_balance_msat: value_to_remote_msat as u64, + preimages + } + } } // Internal utility functions for channels @@ -1098,6 +1345,13 @@ pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channe cmp::min(channel_value_satoshis, cmp::max(q, 1000)) } +// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs. +// Note that num_htlcs should not include dust HTLCs. +#[inline] +fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { + feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 +} + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -1817,253 +2071,6 @@ impl Channel { Ok(chan) } - /// Transaction nomenclature is somewhat confusing here as there are many different cases - a - /// transaction is referred to as "a's transaction" implying that a will be able to broadcast - /// the transaction. Thus, b will generally be sending a signature over such a transaction to - /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As - /// such, a transaction is generally the result of b increasing the amount paid to a (or adding - /// an HTLC to a). - /// @local is used only to convert relevant internal structures which refer to remote vs local - /// to decide value of outputs and direction of HTLCs. - /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC - /// state may indicate that one peer has informed the other that they'd like to add an HTLC but - /// have not yet committed it. Such HTLCs will only be included in transactions which are being - /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both - /// which peer generated this transaction and "to whom" this transaction flows. - #[inline] - fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats - where L::Target: Logger - { - let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); - let num_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); - let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs); - - let broadcaster_dust_limit_satoshis = if local { self.context.holder_dust_limit_satoshis } else { self.context.counterparty_dust_limit_satoshis }; - let mut remote_htlc_total_msat = 0; - let mut local_htlc_total_msat = 0; - let mut value_to_self_msat_offset = 0; - - let mut feerate_per_kw = self.context.feerate_per_kw; - if let Some((feerate, update_state)) = self.context.pending_update_fee { - if match update_state { - // Note that these match the inclusion criteria when scanning - // pending_inbound_htlcs below. - FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); !generated_by_local }, - FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.context.is_outbound()); !generated_by_local }, - FeeUpdateState::Outbound => { assert!(self.context.is_outbound()); generated_by_local }, - } { - feerate_per_kw = feerate; - } - } - - log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", - commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), - get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()), - log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); - - macro_rules! get_htlc_in_commitment { - ($htlc: expr, $offered: expr) => { - HTLCOutputInCommitment { - offered: $offered, - amount_msat: $htlc.amount_msat, - cltv_expiry: $htlc.cltv_expiry, - payment_hash: $htlc.payment_hash, - transaction_output_index: None - } - } - } - - macro_rules! add_htlc_output { - ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => { - if $outbound == local { // "offered HTLC output" - let htlc_in_tx = get_htlc_in_commitment!($htlc, true); - let htlc_tx_fee = if self.context.opt_anchors() { - 0 - } else { - feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000 - }; - if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_non_dust_htlcs.push((htlc_in_tx, $source)); - } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_dust_htlcs.push((htlc_in_tx, $source)); - } - } else { - let htlc_in_tx = get_htlc_in_commitment!($htlc, false); - let htlc_tx_fee = if self.context.opt_anchors() { - 0 - } else { - feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000 - }; - if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_non_dust_htlcs.push((htlc_in_tx, $source)); - } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_dust_htlcs.push((htlc_in_tx, $source)); - } - } - } - } - - for ref htlc in self.context.pending_inbound_htlcs.iter() { - let (include, state_name) = match htlc.state { - InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"), - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"), - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"), - InboundHTLCState::Committed => (true, "Committed"), - InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"), - }; - - if include { - add_htlc_output!(htlc, false, None, state_name); - remote_htlc_total_msat += htlc.amount_msat; - } else { - log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); - match &htlc.state { - &InboundHTLCState::LocalRemoved(ref reason) => { - if generated_by_local { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - value_to_self_msat_offset += htlc.amount_msat as i64; - } - } - }, - _ => {}, - } - } - } - - let mut preimages: Vec = Vec::new(); - - for ref htlc in self.context.pending_outbound_htlcs.iter() { - let (include, state_name) = match htlc.state { - OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"), - OutboundHTLCState::Committed => (true, "Committed"), - OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"), - OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"), - OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"), - }; - - let preimage_opt = match htlc.state { - OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p, - OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p, - OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p, - _ => None, - }; - - if let Some(preimage) = preimage_opt { - preimages.push(preimage); - } - - if include { - add_htlc_output!(htlc, true, Some(&htlc.source), state_name); - local_htlc_total_msat += htlc.amount_msat; - } else { - log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); - match htlc.state { - OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => { - value_to_self_msat_offset -= htlc.amount_msat as i64; - }, - OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => { - if !generated_by_local { - value_to_self_msat_offset -= htlc.amount_msat as i64; - } - }, - _ => {}, - } - } - } - - let mut value_to_self_msat: i64 = (self.context.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; - assert!(value_to_self_msat >= 0); - // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie - // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to - // "violate" their reserve value by couting those against it. Thus, we have to convert - // everything to i64 before subtracting as otherwise we can overflow. - let mut value_to_remote_msat: i64 = (self.context.channel_value_satoshis * 1000) as i64 - (self.context.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; - assert!(value_to_remote_msat >= 0); - - #[cfg(debug_assertions)] - { - // Make sure that the to_self/to_remote is always either past the appropriate - // channel_reserve *or* it is making progress towards it. - let mut broadcaster_max_commitment_tx_output = if generated_by_local { - self.context.holder_max_commitment_tx_output.lock().unwrap() - } else { - self.context.counterparty_max_commitment_tx_output.lock().unwrap() - }; - debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.context.counterparty_selected_channel_reserve_satoshis.unwrap() as i64); - broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64); - debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.context.holder_selected_channel_reserve_satoshis as i64); - broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64); - } - - let total_fee_sat = Channel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.context.channel_transaction_parameters.opt_anchors.is_some()); - let anchors_val = if self.context.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; - let (value_to_self, value_to_remote) = if self.context.is_outbound() { - (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) - } else { - (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64) - }; - - let mut value_to_a = if local { value_to_self } else { value_to_remote }; - let mut value_to_b = if local { value_to_remote } else { value_to_self }; - let (funding_pubkey_a, funding_pubkey_b) = if local { - (self.context.get_holder_pubkeys().funding_pubkey, self.context.get_counterparty_pubkeys().funding_pubkey) - } else { - (self.context.get_counterparty_pubkeys().funding_pubkey, self.context.get_holder_pubkeys().funding_pubkey) - }; - - if value_to_a >= (broadcaster_dust_limit_satoshis as i64) { - log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a); - } else { - value_to_a = 0; - } - - if value_to_b >= (broadcaster_dust_limit_satoshis as i64) { - log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b); - } else { - value_to_b = 0; - } - - let num_nondust_htlcs = included_non_dust_htlcs.len(); - - let channel_parameters = - if local { self.context.channel_transaction_parameters.as_holder_broadcastable() } - else { self.context.channel_transaction_parameters.as_counterparty_broadcastable() }; - let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, - value_to_a as u64, - value_to_b as u64, - self.context.channel_transaction_parameters.opt_anchors.is_some(), - funding_pubkey_a, - funding_pubkey_b, - keys.clone(), - feerate_per_kw, - &mut included_non_dust_htlcs, - &channel_parameters - ); - let mut htlcs_included = included_non_dust_htlcs; - // The unwrap is safe, because all non-dust HTLCs have been assigned an output index - htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); - htlcs_included.append(&mut included_dust_htlcs); - - // For the stats, trimmed-to-0 the value in msats accordingly - value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat }; - value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat }; - - CommitmentStats { - tx, - feerate_per_kw, - total_fee_sat, - num_nondust_htlcs, - htlcs_included, - local_balance_msat: value_to_self_msat as u64, - remote_balance_msat: value_to_remote_msat as u64, - preimages - } - } - #[inline] fn get_closing_scriptpubkey(&self) -> Script { // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script @@ -2611,7 +2618,7 @@ impl Channel { let funding_script = self.get_funding_redeemscript(); let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -2625,7 +2632,7 @@ impl Channel { } let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -2758,7 +2765,7 @@ impl Channel { let funding_script = self.get_funding_redeemscript(); let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -2766,7 +2773,7 @@ impl Channel { log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -3127,13 +3134,6 @@ impl Channel { (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 } - // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs. - // Note that num_htlcs should not include dust HTLCs. - #[inline] - fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { - feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 - } - /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the /// number of pending HTLCs that are on track to be in our next commitment tx. /// @@ -3562,7 +3562,7 @@ impl Channel { let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let commitment_stats = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); let commitment_txid = { let trusted_tx = commitment_stats.tx.trust(); let bitcoin_tx = trusted_tx.built_transaction(); @@ -4153,8 +4153,8 @@ impl Channel { let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let commitment_stats = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); - let buffer_fee_msat = Channel::::commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000; + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); + let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? @@ -5707,7 +5707,7 @@ impl Channel { /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) } @@ -6159,7 +6159,7 @@ impl Channel { fn build_commitment_no_state_update(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys(); - let commitment_stats = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); #[cfg(any(test, fuzzing))] @@ -6191,7 +6191,7 @@ impl Channel { self.build_commitment_no_state_update(logger); let counterparty_keys = self.build_remote_transaction_keys(); - let commitment_stats = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); let (signature, htlc_signatures); @@ -7869,7 +7869,7 @@ mod tests { $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), * } ) => { { let (commitment_tx, htlcs): (_, Vec) = { - let mut commitment_stats = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger); + let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger); let htlcs = commitment_stats.htlcs_included.drain(..) .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None }) From 0d739eeb22c8de9fe9bdf9d27223d134eb6dcf9c Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 12:26:53 +0200 Subject: [PATCH 06/25] Move `Channel::build_holder_transaction_keys` and some other methods This is one of a series of commits to make sure methods are moved by chunks so they are easily reviewable in diffs. Unfortunately they are not purely move-only as fields need to be updated for things to compile, but these should be quite clear. This commit also uses the `context` field where needed for compilation and tests to pass due to the above change. --- lightning/src/ln/channel.rs | 188 ++++++++++++++--------------- lightning/src/ln/channelmanager.rs | 4 +- 2 files changed, 96 insertions(+), 96 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a03a470a500..a5789314820 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1301,6 +1301,46 @@ impl ChannelContext { preimages } } + + #[inline] + /// Creates a set of keys for build_commitment_transaction to generate a transaction which our + /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to + /// our counterparty!) + /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) + /// TODO Some magic rust shit to compile-time check this? + fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { + let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx); + let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; + let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; + let counterparty_pubkeys = self.get_counterparty_pubkeys(); + + TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) + } + + #[inline] + /// Creates a set of keys for build_commitment_transaction to generate a transaction which we + /// will sign and send to our counterparty. + /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) + fn build_remote_transaction_keys(&self) -> TxCreationKeys { + //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we + //may see payments to it! + let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint; + let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; + let counterparty_pubkeys = self.get_counterparty_pubkeys(); + + TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint) + } + + /// Gets the redeemscript for the funding transaction output (ie the funding transaction output + /// pays to get_funding_redeemscript().to_v0_p2wsh()). + /// Panics if called before accept_channel/new_from_req + pub fn get_funding_redeemscript(&self) -> Script { + make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) + } + + fn counterparty_funding_pubkey(&self) -> &PublicKey { + &self.get_counterparty_pubkeys().funding_pubkey + } } // Internal utility functions for channels @@ -2082,26 +2122,26 @@ impl Channel { #[inline] fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 { let mut ret = - (4 + // version - 1 + // input count - 36 + // prevout - 1 + // script length (0) - 4 + // sequence - 1 + // output count - 4 // lock time - )*4 + // * 4 for non-witness parts - 2 + // witness marker and flag - 1 + // witness element count - 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script) - self.get_funding_redeemscript().len() as u64 + // funding witness script - 2*(1 + 71); // two signatures + sighash type flags + (4 + // version + 1 + // input count + 36 + // prevout + 1 + // script length (0) + 4 + // sequence + 1 + // output count + 4 // lock time + )*4 + // * 4 for non-witness parts + 2 + // witness marker and flag + 1 + // witness element count + 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script) + self.context.get_funding_redeemscript().len() as u64 + // funding witness script + 2*(1 + 71); // two signatures + sighash type flags if let Some(spk) = a_scriptpubkey { - ret += ((8+1) + // output values and script length - spk.len() as u64) * 4; // scriptpubkey and witness multiplier + ret += ((8+1) + // output values and script length + spk.len() as u64) * 4; // scriptpubkey and witness multiplier } if let Some(spk) = b_scriptpubkey { - ret += ((8+1) + // output values and script length - spk.len() as u64) * 4; // scriptpubkey and witness multiplier + ret += ((8+1) + // output values and script length + spk.len() as u64) * 4; // scriptpubkey and witness multiplier } ret } @@ -2145,42 +2185,6 @@ impl Channel { self.context.channel_transaction_parameters.funding_outpoint.unwrap() } - #[inline] - /// Creates a set of keys for build_commitment_transaction to generate a transaction which our - /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to - /// our counterparty!) - /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) - /// TODO Some magic rust shit to compile-time check this? - fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { - let per_commitment_point = self.context.holder_signer.get_per_commitment_point(commitment_number, &self.context.secp_ctx); - let delayed_payment_base = &self.context.get_holder_pubkeys().delayed_payment_basepoint; - let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint; - let counterparty_pubkeys = self.context.get_counterparty_pubkeys(); - - TxCreationKeys::derive_new(&self.context.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) - } - - #[inline] - /// Creates a set of keys for build_commitment_transaction to generate a transaction which we - /// will sign and send to our counterparty. - /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) - fn build_remote_transaction_keys(&self) -> TxCreationKeys { - //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we - //may see payments to it! - let revocation_basepoint = &self.context.get_holder_pubkeys().revocation_basepoint; - let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint; - let counterparty_pubkeys = self.context.get_counterparty_pubkeys(); - - TxCreationKeys::derive_new(&self.context.secp_ctx, &self.context.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint) - } - - /// Gets the redeemscript for the funding transaction output (ie the funding transaction output - /// pays to get_funding_redeemscript().to_v0_p2wsh()). - /// Panics if called before accept_channel/new_from_req - pub fn get_funding_redeemscript(&self) -> Script { - make_funding_redeemscript(&self.context.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) - } - /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`] /// entirely. /// @@ -2615,9 +2619,9 @@ impl Channel { } fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { - let funding_script = self.get_funding_redeemscript(); + let funding_script = self.context.get_funding_redeemscript(); - let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); @@ -2625,13 +2629,13 @@ impl Channel { let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); // They sign the holder commitment transaction... log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", - log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), + log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); - secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); } - let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_keys = self.context.build_remote_transaction_keys(); let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); @@ -2646,10 +2650,6 @@ impl Channel { Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) } - fn counterparty_funding_pubkey(&self) -> &PublicKey { - &self.context.get_counterparty_pubkeys().funding_pubkey - } - pub fn funding_created( &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L ) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> @@ -2699,15 +2699,15 @@ impl Channel { msg.signature, Vec::new(), &self.context.get_holder_pubkeys().funding_pubkey, - self.counterparty_funding_pubkey() + self.context.counterparty_funding_pubkey() ); self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - // Now that we're past error-generating stuff, update our local state: + // Now that we're past error-generating stuff, update our local state: - let funding_redeemscript = self.get_funding_redeemscript(); + let funding_redeemscript = self.context.get_funding_redeemscript(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); @@ -2762,9 +2762,9 @@ impl Channel { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } - let funding_script = self.get_funding_redeemscript(); + let funding_script = self.context.get_funding_redeemscript(); - let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_keys = self.context.build_remote_transaction_keys(); let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -2772,7 +2772,7 @@ impl Channel { log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); @@ -2789,14 +2789,14 @@ impl Channel { msg.signature, Vec::new(), &self.context.get_holder_pubkeys().funding_pubkey, - self.counterparty_funding_pubkey() + self.context.counterparty_funding_pubkey() ); self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - let funding_redeemscript = self.get_funding_redeemscript(); + let funding_redeemscript = self.context.get_funding_redeemscript(); let funding_txo = self.context.get_funding_txo().unwrap(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); @@ -3558,9 +3558,9 @@ impl Channel { return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); } - let funding_script = self.get_funding_redeemscript(); + let funding_script = self.context.get_funding_redeemscript(); - let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); let commitment_txid = { @@ -3570,9 +3570,9 @@ impl Channel { log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", log_bytes!(msg.signature.serialize_compact()[..]), - log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), + log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) { return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); } bitcoin_tx.txid @@ -3664,7 +3664,7 @@ impl Channel { msg.signature, msg.htlc_signatures.clone(), &self.context.get_holder_pubkeys().funding_pubkey, - self.counterparty_funding_pubkey() + self.context.counterparty_funding_pubkey() ); self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) @@ -4152,7 +4152,7 @@ impl Channel { // Before proposing a feerate update, check that we can actually afford the new fee. let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); - let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; @@ -4920,7 +4920,7 @@ impl Channel { tx.input[0].witness.push(Vec::new()); // First is the multisig dummy let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize(); - let counterparty_funding_key = self.counterparty_funding_pubkey().serialize(); + let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize(); let mut holder_sig = sig.serialize_der().to_vec(); holder_sig.push(EcdsaSighashType::All as u8); let mut cp_sig = counterparty_sig.serialize_der().to_vec(); @@ -4933,7 +4933,7 @@ impl Channel { tx.input[0].witness.push(holder_sig); } - tx.input[0].witness.push(self.get_funding_redeemscript().into_bytes()); + tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes()); tx } @@ -4964,7 +4964,7 @@ impl Channel { return Ok((None, None)); } - let funding_redeemscript = self.get_funding_redeemscript(); + let funding_redeemscript = self.context.get_funding_redeemscript(); let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false); if used_total_fee != msg.fee_satoshis { return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); @@ -4978,7 +4978,7 @@ impl Channel { // limits, so check for that case by re-checking the signature here. closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0; let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); - secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned()); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned()); }, }; @@ -5405,7 +5405,7 @@ impl Channel { if self.context.funding_tx_confirmation_height == 0 { if tx.txid() == funding_txo.txid { let txo_idx = funding_txo.index as usize; - if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() || + if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() || tx.output[txo_idx].value != self.context.channel_value_satoshis { if self.context.is_outbound() { // If we generated the funding transaction and it doesn't match what it @@ -5706,7 +5706,7 @@ impl Channel { /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { - let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_keys = self.context.build_remote_transaction_keys(); let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) @@ -5794,8 +5794,8 @@ impl Channel { short_channel_id: self.context.get_short_channel_id().unwrap(), node_id_1: if were_node_one { node_id } else { counterparty_node_id }, node_id_2: if were_node_one { counterparty_node_id } else { node_id }, - bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }), - bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }), + bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }), + bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }), excess_data: Vec::new(), }; @@ -5901,10 +5901,10 @@ impl Channel { "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", &announcement, self.context.get_counterparty_node_id()))); } - if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() { + if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() { return Err(ChannelError::Close(format!( "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", - &announcement, self.counterparty_funding_pubkey()))); + &announcement, self.context.counterparty_funding_pubkey()))); } self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature)); @@ -6158,7 +6158,7 @@ impl Channel { } fn build_commitment_no_state_update(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { - let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_keys = self.context.build_remote_transaction_keys(); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); @@ -6190,7 +6190,7 @@ impl Channel { #[cfg(any(test, fuzzing))] self.build_commitment_no_state_update(logger); - let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_keys = self.context.build_remote_transaction_keys(); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); let (signature, htlc_signatures); @@ -6208,7 +6208,7 @@ impl Channel { log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), - &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()), + &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id())); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { @@ -7561,7 +7561,7 @@ mod tests { node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); // Node A --> Node B: funding created - let output_script = node_a_chan.get_funding_redeemscript(); + let output_script = node_a_chan.context.get_funding_redeemscript(); let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut { value: 10000000, script_pubkey: output_script.clone(), }]}; @@ -7878,11 +7878,11 @@ mod tests { }; let trusted_tx = commitment_tx.trust(); let unsigned_tx = trusted_tx.built_transaction(); - let redeemscript = chan.get_funding_redeemscript(); + let redeemscript = chan.context.get_funding_redeemscript(); let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap(); let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis); log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction))); - assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig"); + assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig"); let mut per_htlc: Vec<(HTLCOutputInCommitment, Option)> = Vec::new(); per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls @@ -7900,12 +7900,12 @@ mod tests { counterparty_signature, counterparty_htlc_sigs, &chan.context.holder_signer.pubkeys().funding_pubkey, - chan.counterparty_funding_pubkey() + chan.context.counterparty_funding_pubkey() ); let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap(); assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig"); - let funding_redeemscript = chan.get_funding_redeemscript(); + let funding_redeemscript = chan.context.get_funding_redeemscript(); let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig); assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx"); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 61fca301fa0..fd536a1360c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3216,7 +3216,7 @@ where } let mut output_index = None; - let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); + let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { if output_index.is_some() { @@ -4955,7 +4955,7 @@ where match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); - (chan.get().context.get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id()) + (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } From 3ff94fae55a155603ddcb7c2a96d3b36a0eefa63 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 12:33:41 +0200 Subject: [PATCH 07/25] Move `Channel::get_feerate_sat_per_1000_weight` and other methods This is one of a series of commits to make sure methods are moved by chunks so they are easily reviewable in diffs. Unfortunately they are not purely move-only as fields to be updated for things to compile, but these should be quite clear. This commit also uses the `context` field where needed for compilation and tests to pass due to the above change. f s/tarcontext.get_/target_/ --- lightning/src/ln/channel.rs | 68 +++++++++++------------ lightning/src/ln/channelmanager.rs | 12 ++-- lightning/src/ln/functional_test_utils.rs | 2 +- lightning/src/ln/functional_tests.rs | 2 +- 4 files changed, 42 insertions(+), 42 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a5789314820..e5452d54fae 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1341,6 +1341,33 @@ impl ChannelContext { fn counterparty_funding_pubkey(&self) -> &PublicKey { &self.get_counterparty_pubkeys().funding_pubkey } + + pub fn get_feerate_sat_per_1000_weight(&self) -> u32 { + self.feerate_per_kw + } + + pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option) -> u32 { + // When calculating our exposure to dust HTLCs, we assume that the channel feerate + // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%, + // whichever is higher. This ensures that we aren't suddenly exposed to significantly + // more dust balance if the feerate increases when we have several HTLCs pending + // which are near the dust limit. + let mut feerate_per_kw = self.feerate_per_kw; + // If there's a pending update fee, use it to ensure we aren't under-estimating + // potential feerate updates coming soon. + if let Some((feerate, _)) = self.pending_update_fee { + feerate_per_kw = cmp::max(feerate_per_kw, feerate); + } + if let Some(feerate) = outbound_feerate_update { + feerate_per_kw = cmp::max(feerate_per_kw, feerate); + } + cmp::max(2530, feerate_per_kw * 1250 / 1000) + } + + /// Get forwarding information for the counterparty. + pub fn counterparty_forwarding_info(&self) -> Option { + self.counterparty_forwarding_info.clone() + } } // Internal utility functions for channels @@ -2920,7 +2947,7 @@ impl Channel { let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { - let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(outbound_feerate_update) as u64; (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; @@ -2952,7 +2979,7 @@ impl Channel { let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { - let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(outbound_feerate_update) as u64; (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; @@ -3075,7 +3102,7 @@ impl Channel { let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (self.context.counterparty_dust_limit_satoshis, self.context.holder_dust_limit_satoshis) } else { - let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; (self.context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, self.context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) }; @@ -3383,7 +3410,7 @@ impl Channel { let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { (0, 0) } else { - let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; @@ -4394,7 +4421,7 @@ impl Channel { return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; - let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate(None); + let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None); self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.context.update_time_counter += 1; @@ -5126,28 +5153,6 @@ impl Channel { }) } - pub fn get_feerate_sat_per_1000_weight(&self) -> u32 { - self.context.feerate_per_kw - } - - pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option) -> u32 { - // When calculating our exposure to dust HTLCs, we assume that the channel feerate - // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%, - // whichever is higher. This ensures that we aren't suddenly exposed to significantly - // more dust balance if the feerate increases when we have several HTLCs pending - // which are near the dust limit. - let mut feerate_per_kw = self.context.feerate_per_kw; - // If there's a pending update fee, use it to ensure we aren't under-estimating - // potential feerate updates coming soon. - if let Some((feerate, _)) = self.context.pending_update_fee { - feerate_per_kw = cmp::max(feerate_per_kw, feerate); - } - if let Some(feerate) = outbound_feerate_update { - feerate_per_kw = cmp::max(feerate_per_kw, feerate); - } - cmp::max(2530, feerate_per_kw * 1250 / 1000) - } - pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { self.context.cur_holder_commitment_transaction_number + 1 } @@ -6247,11 +6252,6 @@ impl Channel { } } - /// Get forwarding information for the counterparty. - pub fn counterparty_forwarding_info(&self) -> Option { - self.context.counterparty_forwarding_info.clone() - } - pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> { if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 { return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string())); @@ -7735,7 +7735,7 @@ mod tests { let mut node_a_chan = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); assert!(node_a_chan.context.counterparty_forwarding_info.is_none()); assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default - assert!(node_a_chan.counterparty_forwarding_info().is_none()); + assert!(node_a_chan.context.counterparty_forwarding_info().is_none()); // Make sure that receiving a channel update will update the Channel as expected. let update = ChannelUpdate { @@ -7758,7 +7758,7 @@ mod tests { // The counterparty can send an update with a higher minimum HTLC, but that shouldn't // change our official htlc_minimum_msat. assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); - match node_a_chan.counterparty_forwarding_info() { + match node_a_chan.context.counterparty_forwarding_info() { Some(info) => { assert_eq!(info.cltv_expiry_delta, 100); assert_eq!(info.fee_base_msat, 110); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fd536a1360c..af51595650a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1478,7 +1478,7 @@ impl ChannelDetails { node_id: channel.context.get_counterparty_node_id(), features: latest_features, unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.counterparty_forwarding_info(), + forwarding_info: channel.context.counterparty_forwarding_info(), // Ensures that we have actually received the `htlc_minimum_msat` value // from the counterparty through the `OpenChannel` or `AcceptChannel` // message (as they are always the first message from the counterparty). @@ -1496,7 +1496,7 @@ impl ChannelDetails { outbound_scid_alias: if channel.context.is_usable() { Some(channel.context.outbound_scid_alias()) } else { None }, inbound_scid_alias: channel.context.latest_inbound_scid_alias(), channel_value_satoshis: channel.context.get_value_satoshis(), - feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), + feerate_sat_per_1000_weight: Some(channel.context.get_feerate_sat_per_1000_weight()), unspendable_punishment_reserve: to_self_reserve_satoshis, balance_msat: balance.balance_msat, inbound_capacity_msat: balance.inbound_capacity_msat, @@ -3945,18 +3945,18 @@ where fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { if !chan.context.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother - if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { + if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } if !chan.context.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.logger); NotifyOption::DoPersist diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 79210068f32..a582836a70e 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -790,7 +790,7 @@ macro_rules! get_feerate { let mut per_peer_state_lock; let mut peer_state_lock; let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id); - chan.get_feerate_sat_per_1000_weight() + chan.context.get_feerate_sat_per_1000_weight() } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 58977e78964..d64060be796 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9628,7 +9628,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); - chan.get_dust_buffer_feerate(None) as u64 + chan.context.get_dust_buffer_feerate(None) as u64 }; let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; From 2774aa26d9131b4d9785e88e83599c80e08be964 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 12:34:36 +0200 Subject: [PATCH 08/25] Prepare some methods for upcoming moves to `ChannelContext` To reduce interleaving in commits, we introduce a `context` variable in methods to be moved in upcoming commits so there is minimal change with the moves. --- lightning/src/ln/channel.rs | 162 +++++++++++++++++++----------------- 1 file changed, 84 insertions(+), 78 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e5452d54fae..3a4b92fb47e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2935,8 +2935,9 @@ impl Channel { /// Returns a HTLCStats about inbound pending htlcs fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { + let context = &self.context; let mut stats = HTLCStats { - pending_htlcs: self.context.pending_inbound_htlcs.len() as u32, + pending_htlcs: context.pending_inbound_htlcs.len() as u32, pending_htlcs_value_msat: 0, on_counterparty_tx_dust_exposure_msat: 0, on_holder_tx_dust_exposure_msat: 0, @@ -2944,16 +2945,16 @@ impl Channel { on_holder_tx_holding_cell_htlcs_count: 0, }; - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() { (0, 0) } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(outbound_feerate_update) as u64; + let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; - let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - for ref htlc in self.context.pending_inbound_htlcs.iter() { + let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_inbound_htlcs.iter() { stats.pending_htlcs_value_msat += htlc.amount_msat; if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; @@ -2967,8 +2968,9 @@ impl Channel { /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { + let context = &self.context; let mut stats = HTLCStats { - pending_htlcs: self.context.pending_outbound_htlcs.len() as u32, + pending_htlcs: context.pending_outbound_htlcs.len() as u32, pending_htlcs_value_msat: 0, on_counterparty_tx_dust_exposure_msat: 0, on_holder_tx_dust_exposure_msat: 0, @@ -2976,16 +2978,16 @@ impl Channel { on_holder_tx_holding_cell_htlcs_count: 0, }; - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() { (0, 0) } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(outbound_feerate_update) as u64; + let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) }; - let counterparty_dust_limit_success_sat = htlc_success_dust_limit + self.context.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.holder_dust_limit_satoshis; - for ref htlc in self.context.pending_outbound_htlcs.iter() { + let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_outbound_htlcs.iter() { stats.pending_htlcs_value_msat += htlc.amount_msat; if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; @@ -2995,7 +2997,7 @@ impl Channel { } } - for update in self.context.holding_cell_htlc_updates.iter() { + for update in context.holding_cell_htlc_updates.iter() { if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { stats.pending_htlcs += 1; stats.pending_htlcs_value_msat += amount_msat; @@ -3018,26 +3020,27 @@ impl Channel { /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC /// corner case properly. pub fn get_available_balances(&self) -> AvailableBalances { + let context = &self.context; // Note that we have to handle overflow due to the above case. let inbound_stats = self.get_inbound_pending_htlc_stats(None); let outbound_stats = self.get_outbound_pending_htlc_stats(None); - let mut balance_msat = self.context.value_to_self_msat; - for ref htlc in self.context.pending_inbound_htlcs.iter() { + let mut balance_msat = context.value_to_self_msat; + for ref htlc in context.pending_inbound_htlcs.iter() { if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { balance_msat += htlc.amount_msat; } } balance_msat -= outbound_stats.pending_htlcs_value_msat; - let outbound_capacity_msat = self.context.value_to_self_msat + let outbound_capacity_msat = context.value_to_self_msat .saturating_sub(outbound_stats.pending_htlcs_value_msat) .saturating_sub( - self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); + context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); let mut available_capacity_msat = outbound_capacity_msat; - if self.context.is_outbound() { + if context.is_outbound() { // We should mind channel commit tx fee when computing how much of the available capacity // can be used in the next htlc. Mirrors the logic in send_htlc. // @@ -3045,9 +3048,9 @@ impl Channel { // and the answer will in turn change the amount itself — making it a circular // dependency. // This complicates the computation around dust-values, up to the one-htlc-value. - let mut real_dust_limit_timeout_sat = self.context.holder_dust_limit_satoshis; - if !self.context.opt_anchors() { - real_dust_limit_timeout_sat += self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; + let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis; + if !context.opt_anchors() { + real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered); @@ -3071,16 +3074,16 @@ impl Channel { } else { // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure // sending a new HTLC won't reduce their balance below our reserve threshold. - let mut real_dust_limit_success_sat = self.context.counterparty_dust_limit_satoshis; - if !self.context.opt_anchors() { - real_dust_limit_success_sat += self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; + let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis; + if !context.opt_anchors() { + real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered); let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None); - let holder_selected_chan_reserve_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; - let remote_balance_msat = (self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) + let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; + let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) .saturating_sub(inbound_stats.pending_htlcs_value_msat); if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat { @@ -3090,7 +3093,7 @@ impl Channel { } } - let mut next_outbound_htlc_minimum_msat = self.context.counterparty_htlc_minimum_msat; + let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat; // If we get close to our maximum dust exposure, we end up in a situation where we can send // between zero and the remaining dust exposure limit remaining OR above the dust limit. @@ -3099,25 +3102,25 @@ impl Channel { let mut remaining_msat_below_dust_exposure_limit = None; let mut dust_exposure_dust_limit_msat = 0; - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { - (self.context.counterparty_dust_limit_satoshis, self.context.holder_dust_limit_satoshis) + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { + (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis) } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; - (self.context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, - self.context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) + let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64; + (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, + context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) }; let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 { + if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { remaining_msat_below_dust_exposure_limit = - Some(self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); } let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 { + if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { remaining_msat_below_dust_exposure_limit = Some(cmp::min( remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), - self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); + context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); } @@ -3130,17 +3133,17 @@ impl Channel { } available_capacity_msat = cmp::min(available_capacity_msat, - self.context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); + context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); - if outbound_stats.pending_htlcs + 1 > self.context.counterparty_max_accepted_htlcs as u32 { + if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 { available_capacity_msat = 0; } AvailableBalances { - inbound_capacity_msat: cmp::max(self.context.channel_value_satoshis as i64 * 1000 - - self.context.value_to_self_msat as i64 + inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 + - context.value_to_self_msat as i64 - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - - self.context.holder_selected_channel_reserve_satoshis as i64 * 1000, + - context.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, outbound_capacity_msat, next_outbound_htlc_limit_msat: available_capacity_msat, @@ -3150,7 +3153,8 @@ impl Channel { } pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { - (self.context.holder_selected_channel_reserve_satoshis, self.context.counterparty_selected_channel_reserve_satoshis) + let context = &self.context; + (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis) } // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. @@ -3172,16 +3176,17 @@ impl Channel { /// /// Dust HTLCs are excluded. fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(self.context.is_outbound()); + let context = &self.context; + assert!(context.is_outbound()); - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { (0, 0) } else { - (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) }; - let real_dust_limit_success_sat = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.holder_dust_limit_satoshis; + let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; let mut addl_htlcs = 0; if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } @@ -3199,7 +3204,7 @@ impl Channel { } let mut included_htlcs = 0; - for ref htlc in self.context.pending_inbound_htlcs.iter() { + for ref htlc in context.pending_inbound_htlcs.iter() { if htlc.amount_msat / 1000 < real_dust_limit_success_sat { continue } @@ -3208,7 +3213,7 @@ impl Channel { included_htlcs += 1; } - for ref htlc in self.context.pending_outbound_htlcs.iter() { + for ref htlc in context.pending_outbound_htlcs.iter() { if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { continue } @@ -3223,7 +3228,7 @@ impl Channel { } } - for htlc in self.context.holding_cell_htlc_updates.iter() { + for htlc in context.holding_cell_htlc_updates.iter() { match htlc { &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => { if amount_msat / 1000 < real_dust_limit_timeout_sat { @@ -3237,29 +3242,29 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.context.opt_anchors()); + let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.context.opt_anchors()); + fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); } - let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len() - + self.context.holding_cell_htlc_updates.len(); + let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len() + + context.holding_cell_htlc_updates.len(); let commitment_tx_info = CommitmentTxInfoCached { fee, total_pending_htlcs, next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => self.context.next_holder_htlc_id, + HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, }, next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => self.context.next_counterparty_htlc_id + 1, + HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, }, - feerate: self.context.feerate_per_kw, + feerate: context.feerate_per_kw, }; - *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); } res } @@ -3275,16 +3280,17 @@ impl Channel { /// /// Dust HTLCs are excluded. fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(!self.context.is_outbound()); + let context = &self.context; + assert!(!context.is_outbound()); - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { (0, 0) } else { - (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) }; - let real_dust_limit_success_sat = htlc_success_dust_limit + self.context.counterparty_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; + let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; let mut addl_htlcs = 0; if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } @@ -3305,14 +3311,14 @@ impl Channel { // non-dust inbound HTLCs are included (as all states imply it will be included) and only // committed outbound HTLCs, see below. let mut included_htlcs = 0; - for ref htlc in self.context.pending_inbound_htlcs.iter() { + for ref htlc in context.pending_inbound_htlcs.iter() { if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { continue } included_htlcs += 1; } - for ref htlc in self.context.pending_outbound_htlcs.iter() { + for ref htlc in context.pending_outbound_htlcs.iter() { if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { continue } @@ -3327,28 +3333,28 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.context.opt_anchors()); + let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.context.opt_anchors()); + fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); } - let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); + let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len(); let commitment_tx_info = CommitmentTxInfoCached { fee, total_pending_htlcs, next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => self.context.next_holder_htlc_id, + HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, }, next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => self.context.next_counterparty_htlc_id + 1, + HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, }, - feerate: self.context.feerate_per_kw, + feerate: context.feerate_per_kw, }; - *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); } res } From ed6a5bb1510ea3993146e1d92963a9a099c021db Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 13:19:02 +0200 Subject: [PATCH 09/25] Move `Channel::get_*_pending_htlc_stats` to `ChannelContext` impl --- lightning/src/ln/channel.rs | 182 ++++++++++++++++++------------------ 1 file changed, 91 insertions(+), 91 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3a4b92fb47e..0f07ef8262b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1368,6 +1368,88 @@ impl ChannelContext { pub fn counterparty_forwarding_info(&self) -> Option { self.counterparty_forwarding_info.clone() } + + /// Returns a HTLCStats about inbound pending htlcs + fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { + let context = self; + let mut stats = HTLCStats { + pending_htlcs: context.pending_inbound_htlcs.len() as u32, + pending_htlcs_value_msat: 0, + on_counterparty_tx_dust_exposure_msat: 0, + on_holder_tx_dust_exposure_msat: 0, + holding_cell_msat: 0, + on_holder_tx_holding_cell_htlcs_count: 0, + }; + + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() { + (0, 0) + } else { + let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) + }; + let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_inbound_htlcs.iter() { + stats.pending_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { + stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } + if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { + stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } + } + stats + } + + /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. + fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { + let context = self; + let mut stats = HTLCStats { + pending_htlcs: context.pending_outbound_htlcs.len() as u32, + pending_htlcs_value_msat: 0, + on_counterparty_tx_dust_exposure_msat: 0, + on_holder_tx_dust_exposure_msat: 0, + holding_cell_msat: 0, + on_holder_tx_holding_cell_htlcs_count: 0, + }; + + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() { + (0, 0) + } else { + let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) + }; + let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_outbound_htlcs.iter() { + stats.pending_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { + stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } + if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { + stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } + } + + for update in context.holding_cell_htlc_updates.iter() { + if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { + stats.pending_htlcs += 1; + stats.pending_htlcs_value_msat += amount_msat; + stats.holding_cell_msat += amount_msat; + if *amount_msat / 1000 < counterparty_dust_limit_success_sat { + stats.on_counterparty_tx_dust_exposure_msat += amount_msat; + } + if *amount_msat / 1000 < holder_dust_limit_timeout_sat { + stats.on_holder_tx_dust_exposure_msat += amount_msat; + } else { + stats.on_holder_tx_holding_cell_htlcs_count += 1; + } + } + } + stats + } } // Internal utility functions for channels @@ -2933,88 +3015,6 @@ impl Channel { } } - /// Returns a HTLCStats about inbound pending htlcs - fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { - let context = &self.context; - let mut stats = HTLCStats { - pending_htlcs: context.pending_inbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; - - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() { - (0, 0) - } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) - }; - let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_inbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; - } - } - stats - } - - /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. - fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { - let context = &self.context; - let mut stats = HTLCStats { - pending_htlcs: context.pending_outbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; - - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() { - (0, 0) - } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) - }; - let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; - for ref htlc in context.pending_outbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; - } - } - - for update in context.holding_cell_htlc_updates.iter() { - if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { - stats.pending_htlcs += 1; - stats.pending_htlcs_value_msat += amount_msat; - stats.holding_cell_msat += amount_msat; - if *amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += amount_msat; - } - if *amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += amount_msat; - } else { - stats.on_holder_tx_holding_cell_htlcs_count += 1; - } - } - } - stats - } - /// Get the available balances, see [`AvailableBalances`]'s fields for more info. /// Doesn't bother handling the /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC @@ -3022,8 +3022,8 @@ impl Channel { pub fn get_available_balances(&self) -> AvailableBalances { let context = &self.context; // Note that we have to handle overflow due to the above case. - let inbound_stats = self.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.get_outbound_pending_htlc_stats(None); + let inbound_stats = context.get_inbound_pending_htlc_stats(None); + let outbound_stats = context.get_outbound_pending_htlc_stats(None); let mut balance_msat = context.value_to_self_msat; for ref htlc in context.pending_inbound_htlcs.iter() { @@ -3142,7 +3142,7 @@ impl Channel { AvailableBalances { inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 - context.value_to_self_msat as i64 - - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - context.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, outbound_capacity_msat, @@ -3384,8 +3384,8 @@ impl Channel { return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); } - let inbound_stats = self.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.get_outbound_pending_htlc_stats(None); + let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); } @@ -4183,8 +4183,8 @@ impl Channel { } // Before proposing a feerate update, check that we can actually afford the new fee. - let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); - let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); + let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); + let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000; @@ -4435,8 +4435,8 @@ impl Channel { // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we // won't be pushed over our dust exposure limit by the feerate increase. if feerate_over_dust_buffer { - let inbound_stats = self.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.get_outbound_pending_htlc_stats(None); + let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { From 08ee72be9d73a7ece8c1018229a15969348b9fbe Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 13:26:50 +0200 Subject: [PATCH 10/25] Move `Channel::commit_tx_fee_msat` to file-level utilities --- lightning/src/ln/channel.rs | 40 ++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0f07ef8262b..00faeabbde5 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1501,6 +1501,14 @@ fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) - feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 } +// Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. +// Note that num_htlcs should not include dust HTLCs. +fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { + // Note that we need to divide before multiplying to round properly, + // since the lowest denomination of bitcoin on-chain is the satoshi. + (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 +} + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -1698,7 +1706,7 @@ impl Channel { let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; - let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); + let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); if value_to_self_msat < commitment_tx_fee { return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); } @@ -2032,7 +2040,7 @@ impl Channel { // check if the funder's amount for the initial commitment tx is sufficient // for full fee payment plus a few HTLCs to ensure the channel will be useful. let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat; - let commitment_tx_fee = Self::commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000; + let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000; if funders_amount_msat / 1000 < commitment_tx_fee { return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee))); } @@ -3157,14 +3165,6 @@ impl Channel { (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis) } - // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. - // Note that num_htlcs should not include dust HTLCs. - fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { - // Note that we need to divide before multiplying to round properly, - // since the lowest denomination of bitcoin on-chain is the satoshi. - (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 - } - /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the /// number of pending HTLCs that are on track to be in our next commitment tx. /// @@ -3242,12 +3242,12 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); + let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); + fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); } let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len() + context.holding_cell_htlc_updates.len(); @@ -3333,12 +3333,12 @@ impl Channel { } let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); + let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); #[cfg(any(test, fuzzing))] { let mut fee = res; if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); + fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); } let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len(); let commitment_tx_info = CommitmentTxInfoCached { @@ -6184,7 +6184,7 @@ impl Channel { && info.next_holder_htlc_id == self.context.next_holder_htlc_id && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id && info.feerate == self.context.feerate_per_kw { - let actual_fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors()); + let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors()); assert_eq!(actual_fee, info.fee); } } @@ -7280,7 +7280,7 @@ mod tests { use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; #[cfg(anchors)] use crate::ln::channel::InitFeatures; - use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator}; + use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; @@ -7480,13 +7480,13 @@ mod tests { // the dust limit check. let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None); - let local_commit_fee_0_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors()); + let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors()); assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs); // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all // of the HTLCs are seen to be above the dust limit. node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false; - let remote_commit_fee_3_htlcs = Channel::::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors()); + let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors()); let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs); @@ -7508,8 +7508,8 @@ mod tests { let config = UserConfig::default(); let mut chan = Channel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); - let commitment_tx_fee_0_htlcs = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors()); - let commitment_tx_fee_1_htlc = Channel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors()); + let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors()); + let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors()); // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be // counted as dust when it shouldn't be. From 9f4e71452ae0b81850a42e2b9b52024cbc5bb141 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 13:54:40 +0200 Subject: [PATCH 11/25] Move `Channel::next_*_commit_tx_fee_msat` methods to `ChannelContext` impl --- lightning/src/ln/channel.rs | 422 ++++++++++++++--------------- lightning/src/ln/channelmanager.rs | 2 +- 2 files changed, 212 insertions(+), 212 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 00faeabbde5..96e75965c23 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1450,6 +1450,205 @@ impl ChannelContext { } stats } + + pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { + let context = &self; + (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis) + } + + /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the + /// number of pending HTLCs that are on track to be in our next commitment tx. + /// + /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if + /// `fee_spike_buffer_htlc` is `Some`. + /// + /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the + /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added. + /// + /// Dust HTLCs are excluded. + fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { + let context = &self; + assert!(context.is_outbound()); + + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { + (0, 0) + } else { + (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + }; + let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; + + let mut addl_htlcs = 0; + if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } + match htlc.origin { + HTLCInitiator::LocalOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { + addl_htlcs += 1; + } + }, + HTLCInitiator::RemoteOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { + addl_htlcs += 1; + } + } + } + + let mut included_htlcs = 0; + for ref htlc in context.pending_inbound_htlcs.iter() { + if htlc.amount_msat / 1000 < real_dust_limit_success_sat { + continue + } + // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment + // transaction including this HTLC if it times out before they RAA. + included_htlcs += 1; + } + + for ref htlc in context.pending_outbound_htlcs.iter() { + if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { + continue + } + match htlc.state { + OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1, + OutboundHTLCState::Committed => included_htlcs += 1, + OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, + // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment + // transaction won't be generated until they send us their next RAA, which will mean + // dropping any HTLCs in this state. + _ => {}, + } + } + + for htlc in context.holding_cell_htlc_updates.iter() { + match htlc { + &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => { + if amount_msat / 1000 < real_dust_limit_timeout_sat { + continue + } + included_htlcs += 1 + }, + _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the + // ack we're guaranteed to never include them in commitment txs anymore. + } + } + + let num_htlcs = included_htlcs + addl_htlcs; + let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); + #[cfg(any(test, fuzzing))] + { + let mut fee = res; + if fee_spike_buffer_htlc.is_some() { + fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); + } + let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len() + + context.holding_cell_htlc_updates.len(); + let commitment_tx_info = CommitmentTxInfoCached { + fee, + total_pending_htlcs, + next_holder_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, + }, + next_counterparty_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, + }, + feerate: context.feerate_per_kw, + }; + *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + } + res + } + + /// Get the commitment tx fee for the remote's next commitment transaction based on the number of + /// pending HTLCs that are on track to be in their next commitment tx + /// + /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if + /// `fee_spike_buffer_htlc` is `Some`. + /// + /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the + /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added. + /// + /// Dust HTLCs are excluded. + fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { + let context = &self; + assert!(!context.is_outbound()); + + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { + (0, 0) + } else { + (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + }; + let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; + + let mut addl_htlcs = 0; + if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } + match htlc.origin { + HTLCInitiator::LocalOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { + addl_htlcs += 1; + } + }, + HTLCInitiator::RemoteOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { + addl_htlcs += 1; + } + } + } + + // When calculating the set of HTLCs which will be included in their next commitment_signed, all + // non-dust inbound HTLCs are included (as all states imply it will be included) and only + // committed outbound HTLCs, see below. + let mut included_htlcs = 0; + for ref htlc in context.pending_inbound_htlcs.iter() { + if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { + continue + } + included_htlcs += 1; + } + + for ref htlc in context.pending_outbound_htlcs.iter() { + if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { + continue + } + // We only include outbound HTLCs if it will not be included in their next commitment_signed, + // i.e. if they've responded to us with an RAA after announcement. + match htlc.state { + OutboundHTLCState::Committed => included_htlcs += 1, + OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, + OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1, + _ => {}, + } + } + + let num_htlcs = included_htlcs + addl_htlcs; + let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); + #[cfg(any(test, fuzzing))] + { + let mut fee = res; + if fee_spike_buffer_htlc.is_some() { + fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); + } + let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len(); + let commitment_tx_info = CommitmentTxInfoCached { + fee, + total_pending_htlcs, + next_holder_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, + }, + next_counterparty_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, + }, + feerate: context.feerate_per_kw, + }; + *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + } + res + } } // Internal utility functions for channels @@ -3062,9 +3261,9 @@ impl Channel { } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered); - let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_above_dust, Some(())); + let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(())); let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered); - let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_dust, Some(())); + let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(())); // We will first subtract the fee as if we were above-dust. Then, if the resulting // value ends up being below dust, we have this fee available again. In that case, @@ -3088,7 +3287,7 @@ impl Channel { } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered); - let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None); + let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None); let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) @@ -3160,205 +3359,6 @@ impl Channel { } } - pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { - let context = &self.context; - (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis) - } - - /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the - /// number of pending HTLCs that are on track to be in our next commitment tx. - /// - /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if - /// `fee_spike_buffer_htlc` is `Some`. - /// - /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the - /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added. - /// - /// Dust HTLCs are excluded. - fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - let context = &self.context; - assert!(context.is_outbound()); - - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { - (0, 0) - } else { - (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) - }; - let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; - - let mut addl_htlcs = 0; - if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } - match htlc.origin { - HTLCInitiator::LocalOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { - addl_htlcs += 1; - } - }, - HTLCInitiator::RemoteOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { - addl_htlcs += 1; - } - } - } - - let mut included_htlcs = 0; - for ref htlc in context.pending_inbound_htlcs.iter() { - if htlc.amount_msat / 1000 < real_dust_limit_success_sat { - continue - } - // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment - // transaction including this HTLC if it times out before they RAA. - included_htlcs += 1; - } - - for ref htlc in context.pending_outbound_htlcs.iter() { - if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { - continue - } - match htlc.state { - OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1, - OutboundHTLCState::Committed => included_htlcs += 1, - OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, - // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment - // transaction won't be generated until they send us their next RAA, which will mean - // dropping any HTLCs in this state. - _ => {}, - } - } - - for htlc in context.holding_cell_htlc_updates.iter() { - match htlc { - &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => { - if amount_msat / 1000 < real_dust_limit_timeout_sat { - continue - } - included_htlcs += 1 - }, - _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the - // ack we're guaranteed to never include them in commitment txs anymore. - } - } - - let num_htlcs = included_htlcs + addl_htlcs; - let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); - #[cfg(any(test, fuzzing))] - { - let mut fee = res; - if fee_spike_buffer_htlc.is_some() { - fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); - } - let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len() - + context.holding_cell_htlc_updates.len(); - let commitment_tx_info = CommitmentTxInfoCached { - fee, - total_pending_htlcs, - next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, - }, - next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, - }, - feerate: context.feerate_per_kw, - }; - *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); - } - res - } - - /// Get the commitment tx fee for the remote's next commitment transaction based on the number of - /// pending HTLCs that are on track to be in their next commitment tx - /// - /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if - /// `fee_spike_buffer_htlc` is `Some`. - /// - /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the - /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added. - /// - /// Dust HTLCs are excluded. - fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - let context = &self.context; - assert!(!context.is_outbound()); - - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { - (0, 0) - } else { - (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) - }; - let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; - - let mut addl_htlcs = 0; - if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } - match htlc.origin { - HTLCInitiator::LocalOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { - addl_htlcs += 1; - } - }, - HTLCInitiator::RemoteOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { - addl_htlcs += 1; - } - } - } - - // When calculating the set of HTLCs which will be included in their next commitment_signed, all - // non-dust inbound HTLCs are included (as all states imply it will be included) and only - // committed outbound HTLCs, see below. - let mut included_htlcs = 0; - for ref htlc in context.pending_inbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { - continue - } - included_htlcs += 1; - } - - for ref htlc in context.pending_outbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { - continue - } - // We only include outbound HTLCs if it will not be included in their next commitment_signed, - // i.e. if they've responded to us with an RAA after announcement. - match htlc.state { - OutboundHTLCState::Committed => included_htlcs += 1, - OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, - OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1, - _ => {}, - } - } - - let num_htlcs = included_htlcs + addl_htlcs; - let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors()); - #[cfg(any(test, fuzzing))] - { - let mut fee = res; - if fee_spike_buffer_htlc.is_some() { - fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors()); - } - let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len(); - let commitment_tx_info = CommitmentTxInfoCached { - fee, - total_pending_htlcs, - next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, - }, - next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, - }, - feerate: context.feerate_per_kw, - }; - *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); - } - res - } - pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { // We can't accept HTLCs sent after we've sent a shutdown. @@ -3452,7 +3452,7 @@ impl Channel { // feerate_per_kw, while maintaining their channel reserve (as required by the spec). let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations + self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations }; if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat { return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); @@ -3472,7 +3472,7 @@ impl Channel { // still be able to afford adding this HTLC plus one more future HTLC, regardless of being // sensitive to fee spikes. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. @@ -3482,7 +3482,7 @@ impl Channel { } else { // Check that they won't violate our local required channel reserve by adding this HTLC. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(htlc_candidate, None); + let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat { return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned())); } @@ -7479,7 +7479,7 @@ mod tests { // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass // the dust limit check. let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); - let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None); + let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None); let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors()); assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs); @@ -7488,7 +7488,7 @@ mod tests { node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false; let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors()); let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); - let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); + let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs); } @@ -7515,13 +7515,13 @@ mod tests { // counted as dust when it shouldn't be. let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered); - let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); + let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); // If swapped: this HTLC would be counted as non-dust when it shouldn't be. let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered); - let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None); + let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); chan.context.channel_transaction_parameters.is_outbound_from_holder = false; @@ -7529,13 +7529,13 @@ mod tests { // If swapped: this HTLC would be counted as non-dust when it shouldn't be. let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered); - let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); + let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); // If swapped: this HTLC would be counted as dust when it shouldn't be. let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered); - let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None); + let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index af51595650a..7bf437ee369 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1471,7 +1471,7 @@ impl ChannelDetails { let balance = channel.get_available_balances(); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.get_holder_counterparty_selected_channel_reserve_satoshis(); + channel.context.get_holder_counterparty_selected_channel_reserve_satoshis(); ChannelDetails { channel_id: channel.context.channel_id(), counterparty: ChannelCounterparty { From 60706d6338854ef3a6d41ec8e500826ca0ee1f9e Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 16:40:49 +0200 Subject: [PATCH 12/25] Move `Channel::get_available_balances` to `ChannelContext` impl --- lightning/src/ln/channel.rs | 276 ++++++++++++++--------------- lightning/src/ln/channelmanager.rs | 2 +- 2 files changed, 139 insertions(+), 139 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 96e75965c23..a9e7ffab4eb 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1451,6 +1451,143 @@ impl ChannelContext { stats } + /// Get the available balances, see [`AvailableBalances`]'s fields for more info. + /// Doesn't bother handling the + /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC + /// corner case properly. + pub fn get_available_balances(&self) -> AvailableBalances { + let context = &self; + // Note that we have to handle overflow due to the above case. + let inbound_stats = context.get_inbound_pending_htlc_stats(None); + let outbound_stats = context.get_outbound_pending_htlc_stats(None); + + let mut balance_msat = context.value_to_self_msat; + for ref htlc in context.pending_inbound_htlcs.iter() { + if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { + balance_msat += htlc.amount_msat; + } + } + balance_msat -= outbound_stats.pending_htlcs_value_msat; + + let outbound_capacity_msat = context.value_to_self_msat + .saturating_sub(outbound_stats.pending_htlcs_value_msat) + .saturating_sub( + context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); + + let mut available_capacity_msat = outbound_capacity_msat; + + if context.is_outbound() { + // We should mind channel commit tx fee when computing how much of the available capacity + // can be used in the next htlc. Mirrors the logic in send_htlc. + // + // The fee depends on whether the amount we will be sending is above dust or not, + // and the answer will in turn change the amount itself — making it a circular + // dependency. + // This complicates the computation around dust-values, up to the one-htlc-value. + let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis; + if !context.opt_anchors() { + real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; + } + + let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered); + let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(())); + let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered); + let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(())); + + // We will first subtract the fee as if we were above-dust. Then, if the resulting + // value ends up being below dust, we have this fee available again. In that case, + // match the value to right-below-dust. + let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64); + if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 { + let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat; + debug_assert!(one_htlc_difference_msat != 0); + capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64; + capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat); + available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64; + } else { + available_capacity_msat = capacity_minus_commitment_fee_msat as u64; + } + } else { + // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure + // sending a new HTLC won't reduce their balance below our reserve threshold. + let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis; + if !context.opt_anchors() { + real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; + } + + let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered); + let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None); + + let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; + let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) + .saturating_sub(inbound_stats.pending_htlcs_value_msat); + + if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat { + // If another HTLC's fee would reduce the remote's balance below the reserve limit + // we've selected for them, we can only send dust HTLCs. + available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1); + } + } + + let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat; + + // If we get close to our maximum dust exposure, we end up in a situation where we can send + // between zero and the remaining dust exposure limit remaining OR above the dust limit. + // Because we cannot express this as a simple min/max, we prefer to tell the user they can + // send above the dust limit (as the router can always overpay to meet the dust limit). + let mut remaining_msat_below_dust_exposure_limit = None; + let mut dust_exposure_dust_limit_msat = 0; + + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { + (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis) + } else { + let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64; + (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, + context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) + }; + let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; + if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { + remaining_msat_below_dust_exposure_limit = + Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); + } + + let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; + if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { + remaining_msat_below_dust_exposure_limit = Some(cmp::min( + remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), + context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); + dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); + } + + if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit { + if available_capacity_msat < dust_exposure_dust_limit_msat { + available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat); + } else { + next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat); + } + } + + available_capacity_msat = cmp::min(available_capacity_msat, + context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); + + if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 { + available_capacity_msat = 0; + } + + AvailableBalances { + inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 + - context.value_to_self_msat as i64 + - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - context.holder_selected_channel_reserve_satoshis as i64 * 1000, + 0) as u64, + outbound_capacity_msat, + next_outbound_htlc_limit_msat: available_capacity_msat, + next_outbound_htlc_minimum_msat, + balance_msat, + } + } + pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { let context = &self; (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis) @@ -3222,143 +3359,6 @@ impl Channel { } } - /// Get the available balances, see [`AvailableBalances`]'s fields for more info. - /// Doesn't bother handling the - /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC - /// corner case properly. - pub fn get_available_balances(&self) -> AvailableBalances { - let context = &self.context; - // Note that we have to handle overflow due to the above case. - let inbound_stats = context.get_inbound_pending_htlc_stats(None); - let outbound_stats = context.get_outbound_pending_htlc_stats(None); - - let mut balance_msat = context.value_to_self_msat; - for ref htlc in context.pending_inbound_htlcs.iter() { - if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { - balance_msat += htlc.amount_msat; - } - } - balance_msat -= outbound_stats.pending_htlcs_value_msat; - - let outbound_capacity_msat = context.value_to_self_msat - .saturating_sub(outbound_stats.pending_htlcs_value_msat) - .saturating_sub( - context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); - - let mut available_capacity_msat = outbound_capacity_msat; - - if context.is_outbound() { - // We should mind channel commit tx fee when computing how much of the available capacity - // can be used in the next htlc. Mirrors the logic in send_htlc. - // - // The fee depends on whether the amount we will be sending is above dust or not, - // and the answer will in turn change the amount itself — making it a circular - // dependency. - // This complicates the computation around dust-values, up to the one-htlc-value. - let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis; - if !context.opt_anchors() { - real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; - } - - let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered); - let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(())); - let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered); - let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(())); - - // We will first subtract the fee as if we were above-dust. Then, if the resulting - // value ends up being below dust, we have this fee available again. In that case, - // match the value to right-below-dust. - let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64); - if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 { - let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat; - debug_assert!(one_htlc_difference_msat != 0); - capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64; - capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat); - available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64; - } else { - available_capacity_msat = capacity_minus_commitment_fee_msat as u64; - } - } else { - // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure - // sending a new HTLC won't reduce their balance below our reserve threshold. - let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis; - if !context.opt_anchors() { - real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; - } - - let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered); - let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None); - - let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; - let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) - .saturating_sub(inbound_stats.pending_htlcs_value_msat); - - if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat { - // If another HTLC's fee would reduce the remote's balance below the reserve limit - // we've selected for them, we can only send dust HTLCs. - available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1); - } - } - - let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat; - - // If we get close to our maximum dust exposure, we end up in a situation where we can send - // between zero and the remaining dust exposure limit remaining OR above the dust limit. - // Because we cannot express this as a simple min/max, we prefer to tell the user they can - // send above the dust limit (as the router can always overpay to meet the dust limit). - let mut remaining_msat_below_dust_exposure_limit = None; - let mut dust_exposure_dust_limit_msat = 0; - - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() { - (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis) - } else { - let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64; - (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, - context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) - }; - let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { - remaining_msat_below_dust_exposure_limit = - Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); - dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); - } - - let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { - remaining_msat_below_dust_exposure_limit = Some(cmp::min( - remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), - context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); - dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); - } - - if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit { - if available_capacity_msat < dust_exposure_dust_limit_msat { - available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat); - } else { - next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat); - } - } - - available_capacity_msat = cmp::min(available_capacity_msat, - context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); - - if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 { - available_capacity_msat = 0; - } - - AvailableBalances { - inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 - - context.value_to_self_msat as i64 - - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - - context.holder_selected_channel_reserve_satoshis as i64 * 1000, - 0) as u64, - outbound_capacity_msat, - next_outbound_htlc_limit_msat: available_capacity_msat, - next_outbound_htlc_minimum_msat, - balance_msat, - } - } - pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { // We can't accept HTLCs sent after we've sent a shutdown. @@ -6048,7 +6048,7 @@ impl Channel { return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned())); } - let available_balances = self.get_available_balances(); + let available_balances = self.context.get_available_balances(); if amount_msat < available_balances.next_outbound_htlc_minimum_msat { return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat", available_balances.next_outbound_htlc_minimum_msat))); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7bf437ee369..e22f393f04c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1469,7 +1469,7 @@ impl ChannelDetails { fn from_channel(channel: &Channel, best_block_height: u32, latest_features: InitFeatures) -> Self { - let balance = channel.get_available_balances(); + let balance = channel.context.get_available_balances(); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = channel.context.get_holder_counterparty_selected_channel_reserve_satoshis(); ChannelDetails { From 25c1ad8e1918ee985f39b2df8714df96cad2eae6 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 11:03:46 +0200 Subject: [PATCH 13/25] Convert `ChannelDetails::from_channel` to `ChannelDetails::from_channel_context` This rename and refactor is so that we can get channel details from a `ChannelContext` which is a common object to all channels. --- lightning/src/ln/channelmanager.rs | 60 +++++++++++++++--------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e22f393f04c..d427e35cc52 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch}; +use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; @@ -1466,54 +1466,54 @@ impl ChannelDetails { self.short_channel_id.or(self.outbound_scid_alias) } - fn from_channel(channel: &Channel, + fn from_channel_context(context: &ChannelContext, best_block_height: u32, latest_features: InitFeatures) -> Self { - let balance = channel.context.get_available_balances(); + let balance = context.get_available_balances(); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.context.get_holder_counterparty_selected_channel_reserve_satoshis(); + context.get_holder_counterparty_selected_channel_reserve_satoshis(); ChannelDetails { - channel_id: channel.context.channel_id(), + channel_id: context.channel_id(), counterparty: ChannelCounterparty { - node_id: channel.context.get_counterparty_node_id(), + node_id: context.get_counterparty_node_id(), features: latest_features, unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.context.counterparty_forwarding_info(), + forwarding_info: context.counterparty_forwarding_info(), // Ensures that we have actually received the `htlc_minimum_msat` value // from the counterparty through the `OpenChannel` or `AcceptChannel` // message (as they are always the first message from the counterparty). // Else `Channel::get_counterparty_htlc_minimum_msat` could return the // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.context.have_received_message() { - Some(channel.context.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.context.get_counterparty_htlc_maximum_msat(), + outbound_htlc_minimum_msat: if context.have_received_message() { + Some(context.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: context.get_counterparty_htlc_maximum_msat(), }, - funding_txo: channel.context.get_funding_txo(), + funding_txo: context.get_funding_txo(), // Note that accept_channel (or open_channel) is always the first message, so // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.context.have_received_message() { Some(channel.context.get_channel_type().clone()) } else { None }, - short_channel_id: channel.context.get_short_channel_id(), - outbound_scid_alias: if channel.context.is_usable() { Some(channel.context.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.context.latest_inbound_scid_alias(), - channel_value_satoshis: channel.context.get_value_satoshis(), - feerate_sat_per_1000_weight: Some(channel.context.get_feerate_sat_per_1000_weight()), + channel_type: if context.have_received_message() { Some(context.get_channel_type().clone()) } else { None }, + short_channel_id: context.get_short_channel_id(), + outbound_scid_alias: if context.is_usable() { Some(context.outbound_scid_alias()) } else { None }, + inbound_scid_alias: context.latest_inbound_scid_alias(), + channel_value_satoshis: context.get_value_satoshis(), + feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()), unspendable_punishment_reserve: to_self_reserve_satoshis, balance_msat: balance.balance_msat, inbound_capacity_msat: balance.inbound_capacity_msat, outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, - user_channel_id: channel.context.get_user_id(), - confirmations_required: channel.context.minimum_depth(), - confirmations: Some(channel.context.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: channel.context.get_counterparty_selected_contest_delay(), - is_outbound: channel.context.is_outbound(), - is_channel_ready: channel.context.is_usable(), - is_usable: channel.context.is_live(), - is_public: channel.context.should_announce(), - inbound_htlc_minimum_msat: Some(channel.context.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.context.get_holder_htlc_maximum_msat(), - config: Some(channel.context.config()), + user_channel_id: context.get_user_id(), + confirmations_required: context.minimum_depth(), + confirmations: Some(context.get_funding_tx_confirmations(best_block_height)), + force_close_spend_delay: context.get_counterparty_selected_contest_delay(), + is_outbound: context.is_outbound(), + is_channel_ready: context.is_usable(), + is_usable: context.is_live(), + is_public: context.should_announce(), + inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), + config: Some(context.config()), } } } @@ -2091,7 +2091,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { - let details = ChannelDetails::from_channel(channel, best_block_height, + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, peer_state.latest_features.clone()); res.push(details); } @@ -2131,7 +2131,7 @@ where return peer_state.channel_by_id .iter() .map(|(_, channel)| - ChannelDetails::from_channel(channel, best_block_height, features.clone())) + ChannelDetails::from_channel_context(&channel.context, best_block_height, features.clone())) .collect(); } vec![] From e3f0c55182abeab483972175c1dde114741574fd Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 16:43:43 +0200 Subject: [PATCH 14/25] Make `ChannelManager::issue_channel_close_events` take a `ChannelContext` --- lightning/src/ln/channel.rs | 18 +++++++++--------- lightning/src/ln/channelmanager.rs | 26 +++++++++++++------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a9e7ffab4eb..1e37335a86f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1786,6 +1786,15 @@ impl ChannelContext { } res } + + /// Returns transaction if there is pending funding transaction that is yet to broadcast + pub fn unbroadcasted_funding(&self) -> Option { + if self.channel_state & (ChannelState::FundingCreated as u32) != 0 { + self.funding_transaction.clone() + } else { + None + } + } } // Internal utility functions for channels @@ -3350,15 +3359,6 @@ impl Channel { Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) } - /// Returns transaction if there is pending funding transaction that is yet to broadcast - pub fn unbroadcasted_funding(&self) -> Option { - if self.context.channel_state & (ChannelState::FundingCreated as u32) != 0 { - self.context.funding_transaction.clone() - } else { - None - } - } - pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { // We can't accept HTLCs sent after we've sent a shutdown. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d427e35cc52..49ebf6fe144 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2166,19 +2166,19 @@ where } /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { + fn issue_channel_close_events(&self, context: &ChannelContext<::Signer>, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); - match channel.unbroadcasted_funding() { + match context.unbroadcasted_funding() { Some(transaction) => { pending_events_lock.push_back((events::Event::DiscardFunding { - channel_id: channel.context.channel_id(), transaction + channel_id: context.channel_id(), transaction }, None)); }, None => {}, } pending_events_lock.push_back((events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), + channel_id: context.channel_id(), + user_channel_id: context.get_user_id(), reason: closure_reason }, None)); } @@ -2225,7 +2225,7 @@ where msg: channel_update }); } - self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); + self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); } break Ok(()); }, @@ -2335,9 +2335,9 @@ where let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }); + self.issue_channel_close_events(&chan.get().context, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }); } else { - self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + self.issue_channel_close_events(&chan.get().context, ClosureReason::HolderForceClosed); } remove_channel!(self, chan) } else { @@ -5212,7 +5212,7 @@ where msg: update }); } - self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure); + self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); } Ok(()) } @@ -5697,7 +5697,7 @@ where } else { ClosureReason::CommitmentTxConfirmed }; - self.issue_channel_close_events(&chan, reason); + self.issue_channel_close_events(&chan.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -5822,7 +5822,7 @@ where }); } - self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); @@ -6540,7 +6540,7 @@ where }); } let reason_message = format!("{}", reason); - self.issue_channel_close_events(channel, reason); + self.issue_channel_close_events(&channel.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -6791,7 +6791,7 @@ where chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { update_maps_on_chan_removal!(self, chan); - self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); + self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); return false; } true From 10125269d22c719107de062d4bbc47601d448d85 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Thu, 30 Mar 2023 11:17:32 +0200 Subject: [PATCH 15/25] Move channel constants up --- lightning/src/ln/channel.rs | 158 ++++++++++++++++++------------------ 1 file changed, 79 insertions(+), 79 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1e37335a86f..c313a879b09 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -304,6 +304,85 @@ const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisc pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; +pub const DEFAULT_MAX_HTLCS: u16 = 50; + +pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { + const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; + const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124; + if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT } +} + +#[cfg(not(test))] +const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; +#[cfg(test)] +pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; + +pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330; + +/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to, +/// before this was made configurable. The percentage was made configurable in LDK 0.0.107, +/// although LDK 0.0.104+ enabled serialization of channels with a different value set for +/// `holder_max_htlc_value_in_flight_msat`. +pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10; + +/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if +/// `option_support_large_channel` (aka wumbo channels) is not supported. +/// It's 2^24 - 1. +pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1; + +/// Total bitcoin supply in satoshis. +pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000; + +/// The maximum network dust limit for standard script formats. This currently represents the +/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire +/// transaction non-standard and thus refuses to relay it. +/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many +/// implementations use this value for their dust limit today. +pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546; + +/// The maximum channel dust limit we will accept from our counterparty. +pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS; + +/// The dust limit is used for both the commitment transaction outputs as well as the closing +/// transactions. For cooperative closing transactions, we require segwit outputs, though accept +/// *any* segwit scripts, which are allowed to be up to 42 bytes in length. +/// In order to avoid having to concern ourselves with standardness during the closing process, we +/// simply require our counterparty to use a dust limit which will leave any segwit output +/// standard. +/// See for more details. +pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354; + +// Just a reasonable implementation-specific safe lower bound, higher than the dust limit. +pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000; + +/// Used to return a simple Error back to ChannelManager. Will get converted to a +/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our +/// channel_id in ChannelManager. +pub(super) enum ChannelError { + Ignore(String), + Warn(String), + Close(String), +} + +impl fmt::Debug for ChannelError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e), + &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e), + &ChannelError::Close(ref e) => write!(f, "Close : {}", e), + } + } +} + +macro_rules! secp_check { + ($res: expr, $err: expr) => { + match $res { + Ok(thing) => thing, + Err(_) => return Err(ChannelError::Close($err)), + } + }; +} + /// The "channel disabled" bit in channel_update must be set based on whether we are connected to /// our counterparty or not. However, we don't want to announce updates right away to avoid /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to @@ -1874,85 +1953,6 @@ struct CommitmentTxInfoCached { feerate: u32, } -pub const DEFAULT_MAX_HTLCS: u16 = 50; - -pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { - const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; - const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124; - if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT } -} - -#[cfg(not(test))] -const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; -#[cfg(test)] -pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; - -pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330; - -/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to, -/// before this was made configurable. The percentage was made configurable in LDK 0.0.107, -/// although LDK 0.0.104+ enabled serialization of channels with a different value set for -/// `holder_max_htlc_value_in_flight_msat`. -pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10; - -/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if -/// `option_support_large_channel` (aka wumbo channels) is not supported. -/// It's 2^24 - 1. -pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1; - -/// Total bitcoin supply in satoshis. -pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000; - -/// The maximum network dust limit for standard script formats. This currently represents the -/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire -/// transaction non-standard and thus refuses to relay it. -/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many -/// implementations use this value for their dust limit today. -pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546; - -/// The maximum channel dust limit we will accept from our counterparty. -pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS; - -/// The dust limit is used for both the commitment transaction outputs as well as the closing -/// transactions. For cooperative closing transactions, we require segwit outputs, though accept -/// *any* segwit scripts, which are allowed to be up to 42 bytes in length. -/// In order to avoid having to concern ourselves with standardness during the closing process, we -/// simply require our counterparty to use a dust limit which will leave any segwit output -/// standard. -/// See for more details. -pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354; - -// Just a reasonable implementation-specific safe lower bound, higher than the dust limit. -pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000; - -/// Used to return a simple Error back to ChannelManager. Will get converted to a -/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our -/// channel_id in ChannelManager. -pub(super) enum ChannelError { - Ignore(String), - Warn(String), - Close(String), -} - -impl fmt::Debug for ChannelError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e), - &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e), - &ChannelError::Close(ref e) => write!(f, "Close : {}", e), - } - } -} - -macro_rules! secp_check { - ($res: expr, $err: expr) => { - match $res { - Ok(thing) => thing, - Err(_) => return Err(ChannelError::Close($err)), - } - }; -} - impl Channel { fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { // The default channel type (ie the first one we try) depends on whether the channel is From 883e0566ef22ca66c65f7798cef146639fec50b0 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 5 Apr 2023 13:43:02 +0200 Subject: [PATCH 16/25] Introduce `InboundV1Channel` & `OutboundV1Channel` --- lightning/src/ln/channel.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c313a879b09..3c22bdad430 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6437,6 +6437,26 @@ impl Channel { } } +/// A not-yet-funded outbound (from holder) channel using V1 channel establishment. +pub(super) struct OutboundV1Channel { + #[cfg(not(test))] + context: ChannelContext, + #[cfg(test)] + pub context: ChannelContext, +} + +impl OutboundV1Channel {} + +/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. +pub(super) struct InboundV1Channel { + #[cfg(not(test))] + context: ChannelContext, + #[cfg(test)] + pub context: ChannelContext, +} + +impl InboundV1Channel {} + const SERIALIZATION_VERSION: u8 = 3; const MIN_SERIALIZATION_VERSION: u8 = 2; From e6c2f04f153bca0d8c0a328c4cd825be1168a7cc Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 16:09:35 +0200 Subject: [PATCH 17/25] Move outbound channel constructor into `OutboundV1Channel` impl --- lightning/src/ln/channel.rs | 491 +++++++++++++-------------- lightning/src/ln/channelmanager.rs | 4 +- lightning/src/ln/functional_tests.rs | 6 +- 3 files changed, 248 insertions(+), 253 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3c22bdad430..131b9b40249 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1954,32 +1954,6 @@ struct CommitmentTxInfoCached { } impl Channel { - fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { - // The default channel type (ie the first one we try) depends on whether the channel is - // public - if it is, we just go with `only_static_remotekey` as it's the only option - // available. If it's private, we first try `scid_privacy` as it provides better privacy - // with no other changes, and fall back to `only_static_remotekey`. - let mut ret = ChannelTypeFeatures::only_static_remote_key(); - if !config.channel_handshake_config.announced_channel && - config.channel_handshake_config.negotiate_scid_privacy && - their_features.supports_scid_privacy() { - ret.set_scid_privacy_required(); - } - - // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we - // set it now. If they don't understand it, we'll fall back to our default of - // `only_static_remotekey`. - #[cfg(anchors)] - { // Attributes are not allowed on if expressions on our current MSRV of 1.41. - if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && - their_features.supports_anchors_zero_fee_htlc_tx() { - ret.set_anchors_zero_fee_htlc_tx_required(); - } - } - - ret - } - /// If we receive an error message, it may only be a rejection of the channel type we tried, /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. @@ -2011,203 +1985,6 @@ impl Channel { } // Constructors: - pub fn new_outbound( - fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, - channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64 - ) -> Result, APIError> - where ES::Target: EntropySource, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - { - let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; - let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); - let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); - let pubkeys = holder_signer.pubkeys().clone(); - - if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { - return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); - } - if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); - } - let channel_value_msat = channel_value_satoshis * 1000; - if push_msat > channel_value_msat { - return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); - } - if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { - return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); - } - let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); - if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - // Protocol level safety check in place, although it should never happen because - // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); - } - - let channel_type = Self::get_initial_channel_type(&config, their_features); - debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); - - let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - - let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; - let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); - if value_to_self_msat < commitment_tx_fee { - return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); - } - - let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - - let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => Some(scriptpubkey), - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), - } - } else { None }; - - if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { - if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); - } - } - - let destination_script = match signer_provider.get_destination_script() { - Ok(script) => script, - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), - }; - - let temporary_channel_id = entropy_source.get_secure_random_bytes(); - - Ok(Channel { - context: ChannelContext { - user_id, - - config: LegacyChannelConfig { - options: config.channel_config.clone(), - announced_channel: config.channel_handshake_config.announced_channel, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, - }, - - prev_config: None, - - inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), - - channel_id: temporary_channel_id, - temporary_channel_id: Some(temporary_channel_id), - channel_state: ChannelState::OurInitSent as u32, - announcement_sigs_state: AnnouncementSigsState::NotSent, - secp_ctx, - channel_value_satoshis, - - latest_monitor_update_id: 0, - - holder_signer, - shutdown_scriptpubkey, - destination_script, - - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat, - - pending_inbound_htlcs: Vec::new(), - pending_outbound_htlcs: Vec::new(), - holding_cell_htlc_updates: Vec::new(), - pending_update_fee: None, - holding_cell_update_fee: None, - next_holder_htlc_id: 0, - next_counterparty_htlc_id: 0, - update_time_counter: 1, - - resend_order: RAACommitmentOrder::CommitmentFirst, - - monitor_pending_channel_ready: false, - monitor_pending_revoke_and_ack: false, - monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), - monitor_pending_failures: Vec::new(), - monitor_pending_finalized_fulfills: Vec::new(), - - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw: None, - - inbound_awaiting_accept: false, - - funding_tx_confirmed_in: None, - funding_tx_confirmation_height: 0, - short_channel_id: None, - channel_creation_height: current_chain_height, - - feerate_per_kw: feerate, - counterparty_dust_limit_satoshis: 0, - holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: 0, - holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel - holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: 0, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: 0, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), - minimum_depth: None, // Filled in in accept_channel - - counterparty_forwarding_info: None, - - channel_transaction_parameters: ChannelTransactionParameters { - holder_pubkeys: pubkeys, - holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: true, - counterparty_parameters: None, - funding_outpoint: None, - opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, - opt_non_zero_fee_anchors: None - }, - funding_transaction: None, - - counterparty_cur_commitment_point: None, - counterparty_prev_commitment_point: None, - counterparty_node_id, - - counterparty_shutdown_scriptpubkey: None, - - commitment_secrets: CounterpartyCommitmentSecrets::new(), - - channel_update_status: ChannelUpdateStatus::Enabled, - closing_signed_in_flight: false, - - announcement_sigs: None, - - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), - - workaround_lnd_bug_4006: None, - sent_message_awaiting_response: None, - - latest_inbound_scid_alias: None, - outbound_scid_alias, - - channel_pending_event_emitted: false, - channel_ready_event_emitted: false, - - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), - - channel_type, - channel_keys_id, - - pending_monitor_updates: Vec::new(), - } - }) - } fn check_remote_fee(fee_estimator: &LowerBoundedFeeEstimator, feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L) @@ -6439,19 +6216,237 @@ impl Channel { /// A not-yet-funded outbound (from holder) channel using V1 channel establishment. pub(super) struct OutboundV1Channel { - #[cfg(not(test))] - context: ChannelContext, - #[cfg(test)] pub context: ChannelContext, } -impl OutboundV1Channel {} +impl OutboundV1Channel { + fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { + // The default channel type (ie the first one we try) depends on whether the channel is + // public - if it is, we just go with `only_static_remotekey` as it's the only option + // available. If it's private, we first try `scid_privacy` as it provides better privacy + // with no other changes, and fall back to `only_static_remotekey`. + let mut ret = ChannelTypeFeatures::only_static_remote_key(); + if !config.channel_handshake_config.announced_channel && + config.channel_handshake_config.negotiate_scid_privacy && + their_features.supports_scid_privacy() { + ret.set_scid_privacy_required(); + } + + // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we + // set it now. If they don't understand it, we'll fall back to our default of + // `only_static_remotekey`. + #[cfg(anchors)] + { // Attributes are not allowed on if expressions on our current MSRV of 1.41. + if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && + their_features.supports_anchors_zero_fee_htlc_tx() { + ret.set_anchors_zero_fee_htlc_tx_required(); + } + } + + ret + } + + pub fn new_outbound( + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, + channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, + outbound_scid_alias: u64 + ) -> Result, APIError> + where ES::Target: EntropySource, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + { + let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; + let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); + let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); + let pubkeys = holder_signer.pubkeys().clone(); + + if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { + return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); + } + if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); + } + let channel_value_msat = channel_value_satoshis * 1000; + if push_msat > channel_value_msat { + return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); + } + if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { + return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); + } + let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + // Protocol level safety check in place, although it should never happen because + // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` + return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); + } + + let channel_type = Self::get_initial_channel_type(&config, their_features); + debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); + + let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + + let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; + let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); + if value_to_self_msat < commitment_tx_fee { + return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); + } + + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + + let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), + } + } else { None }; + + if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { + if !shutdown_scriptpubkey.is_compatible(&their_features) { + return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); + } + } + + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + }; + + let temporary_channel_id = entropy_source.get_secure_random_bytes(); + + Ok(Channel { + context: ChannelContext { + user_id, + + config: LegacyChannelConfig { + options: config.channel_config.clone(), + announced_channel: config.channel_handshake_config.announced_channel, + commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + }, + + prev_config: None, + + inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), + + channel_id: temporary_channel_id, + temporary_channel_id: Some(temporary_channel_id), + channel_state: ChannelState::OurInitSent as u32, + announcement_sigs_state: AnnouncementSigsState::NotSent, + secp_ctx, + channel_value_satoshis, + + latest_monitor_update_id: 0, + + holder_signer, + shutdown_scriptpubkey, + destination_script, + + cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + value_to_self_msat, + + pending_inbound_htlcs: Vec::new(), + pending_outbound_htlcs: Vec::new(), + holding_cell_htlc_updates: Vec::new(), + pending_update_fee: None, + holding_cell_update_fee: None, + next_holder_htlc_id: 0, + next_counterparty_htlc_id: 0, + update_time_counter: 1, + + resend_order: RAACommitmentOrder::CommitmentFirst, + + monitor_pending_channel_ready: false, + monitor_pending_revoke_and_ack: false, + monitor_pending_commitment_signed: false, + monitor_pending_forwards: Vec::new(), + monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), + + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, + + inbound_awaiting_accept: false, + + funding_tx_confirmed_in: None, + funding_tx_confirmation_height: 0, + short_channel_id: None, + channel_creation_height: current_chain_height, + + feerate_per_kw: feerate, + counterparty_dust_limit_satoshis: 0, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, + counterparty_max_htlc_value_in_flight_msat: 0, + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel + holder_selected_channel_reserve_satoshis, + counterparty_htlc_minimum_msat: 0, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + counterparty_max_accepted_htlcs: 0, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), + minimum_depth: None, // Filled in in accept_channel + + counterparty_forwarding_info: None, + + channel_transaction_parameters: ChannelTransactionParameters { + holder_pubkeys: pubkeys, + holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, + is_outbound_from_holder: true, + counterparty_parameters: None, + funding_outpoint: None, + opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, + opt_non_zero_fee_anchors: None + }, + funding_transaction: None, + + counterparty_cur_commitment_point: None, + counterparty_prev_commitment_point: None, + counterparty_node_id, + + counterparty_shutdown_scriptpubkey: None, + + commitment_secrets: CounterpartyCommitmentSecrets::new(), + + channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, + + announcement_sigs: None, + + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, + + latest_inbound_scid_alias: None, + outbound_scid_alias, + + channel_pending_event_emitted: false, + channel_ready_event_emitted: false, + + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills: HashSet::new(), + + channel_type, + channel_keys_id, + + pending_monitor_updates: Vec::new(), + } + }) + } +} /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. pub(super) struct InboundV1Channel { - #[cfg(not(test))] - context: ChannelContext, - #[cfg(test)] pub context: ChannelContext, } @@ -7300,7 +7295,7 @@ mod tests { use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; #[cfg(anchors)] use crate::ln::channel::InitFeatures; - use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; + use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; @@ -7409,7 +7404,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { + match OutboundV1Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -7432,7 +7427,7 @@ mod tests { let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = Channel::::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let node_a_chan = OutboundV1Channel::::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. @@ -7458,7 +7453,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -7526,7 +7521,7 @@ mod tests { let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = Channel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut chan = OutboundV1Channel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors()); let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors()); @@ -7575,7 +7570,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); @@ -7638,12 +7633,12 @@ mod tests { // Test that `new_outbound` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); + let chan_1 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); + let chan_2 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); @@ -7663,14 +7658,14 @@ mod tests { // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); + let chan_5 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `new_outbound` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); + let chan_6 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); @@ -7723,7 +7718,7 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = Channel::::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); + let chan = OutboundV1Channel::::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); @@ -7758,7 +7753,7 @@ mod tests { // Create a channel. let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); assert!(node_a_chan.context.counterparty_forwarding_info.is_none()); assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default assert!(node_a_chan.context.counterparty_forwarding_info().is_none()); @@ -8556,7 +8551,7 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, + let node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -8590,7 +8585,7 @@ mod tests { // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both // need to signal it. - let channel_a = Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, &config, 0, 42 @@ -8601,7 +8596,7 @@ mod tests { expected_channel_type.set_static_remote_key_required(); expected_channel_type.set_anchors_zero_fee_htlc_tx_required(); - let channel_a = Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8639,7 +8634,7 @@ mod tests { let raw_init_features = static_remote_key_required | simple_anchors_required; let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec()); - let channel_a = Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8686,7 +8681,7 @@ mod tests { // First, we'll try to open a channel between A and B where A requests a channel type for // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by // B as it's not supported by LDK. - let channel_a = Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8705,7 +8700,7 @@ mod tests { // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the // original `option_anchors` feature, which should be rejected by A as it's not supported by // LDK. - let mut channel_a = Channel::::new_outbound( + let mut channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, 10000000, 100000, 42, &config, 0, 42 ).unwrap(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 49ebf6fe144..78265a0b7f1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch}; +use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; @@ -2044,7 +2044,7 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, + match OutboundV1Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height(), outbound_scid_alias) { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d64060be796..6af81108f27 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; -use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis}; +use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, Channel, ChannelError}; use crate::ln::{chan_utils, onion_utils}; @@ -6950,8 +6950,8 @@ fn test_user_configurable_csv_delay() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound() - if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new_outbound() + if let Err(error) = OutboundV1Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, &low_our_to_self_config, 0, 42) { From baadeb73746980f684a0f2fff1a3c8f39350f9d0 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 16:17:18 +0200 Subject: [PATCH 18/25] Move inbound channel constructor into `InboundV1Channel` impl --- lightning/src/ln/channel.rs | 6588 +++++++++++++------------- lightning/src/ln/channelmanager.rs | 4 +- lightning/src/ln/functional_tests.rs | 12 +- 3 files changed, 3302 insertions(+), 3302 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 131b9b40249..7e983fa7788 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2018,2136 +2018,2093 @@ impl Channel { Ok(()) } - /// Creates a new channel from a remote sides' request for one. - /// Assumes chain_hash has already been checked and corresponds with what we expect! - pub fn new_from_req( - fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, - counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, - their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, - current_chain_height: u32, logger: &L, outbound_scid_alias: u64 - ) -> Result, ChannelError> - where ES::Target: EntropySource, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - L::Target: Logger, - { - let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false }; + #[inline] + fn get_closing_scriptpubkey(&self) -> Script { + // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script + // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method + // outside of those situations will fail. + self.context.shutdown_scriptpubkey.clone().unwrap().into_inner() + } - // First check the channel type is known, failing before we do anything else if we don't - // support this channel type. - let channel_type = if let Some(channel_type) = &msg.channel_type { - if channel_type.supports_any_optional_bits() { - return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned())); - } + #[inline] + fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 { + let mut ret = + (4 + // version + 1 + // input count + 36 + // prevout + 1 + // script length (0) + 4 + // sequence + 1 + // output count + 4 // lock time + )*4 + // * 4 for non-witness parts + 2 + // witness marker and flag + 1 + // witness element count + 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script) + self.context.get_funding_redeemscript().len() as u64 + // funding witness script + 2*(1 + 71); // two signatures + sighash type flags + if let Some(spk) = a_scriptpubkey { + ret += ((8+1) + // output values and script length + spk.len() as u64) * 4; // scriptpubkey and witness multiplier + } + if let Some(spk) = b_scriptpubkey { + ret += ((8+1) + // output values and script length + spk.len() as u64) * 4; // scriptpubkey and witness multiplier + } + ret + } - // We only support the channel types defined by the `ChannelManager` in - // `provided_channel_type_features`. The channel type must always support - // `static_remote_key`. - if !channel_type.requires_static_remote_key() { - return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned())); - } - // Make sure we support all of the features behind the channel type. - if !channel_type.is_subset(our_supported_features) { - return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned())); - } - if channel_type.requires_scid_privacy() && announced_channel { - return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned())); - } - channel_type.clone() - } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); - } - channel_type - }; - let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx(); + #[inline] + fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) { + assert!(self.context.pending_inbound_htlcs.is_empty()); + assert!(self.context.pending_outbound_htlcs.is_empty()); + assert!(self.context.pending_update_fee.is_none()); - let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id); - let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id); - let pubkeys = holder_signer.pubkeys().clone(); - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.funding_pubkey, - revocation_basepoint: msg.revocation_basepoint, - payment_point: msg.payment_point, - delayed_payment_basepoint: msg.delayed_payment_basepoint, - htlc_basepoint: msg.htlc_basepoint - }; + let mut total_fee_satoshis = proposed_total_fee_satoshis; + let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 }; + let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 }; - if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT { - return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT))); + if value_to_holder < 0 { + assert!(self.context.is_outbound()); + total_fee_satoshis += (-value_to_holder) as u64; + } else if value_to_counterparty < 0 { + assert!(!self.context.is_outbound()); + total_fee_satoshis += (-value_to_counterparty) as u64; } - // Check sanity of message fields: - if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis { - return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis))); - } - if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis))); - } - if msg.channel_reserve_satoshis > msg.funding_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis))); - } - let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.push_msat > full_channel_value_msat { - return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat))); - } - if msg.dust_limit_satoshis > msg.funding_satoshis { - return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis))); - } - if msg.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); + if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis { + value_to_counterparty = 0; } - Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?; - let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.to_self_delay > max_counterparty_selected_contest_delay { - return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay))); - } - if msg.max_accepted_htlcs < 1 { - return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); - } - if msg.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); + if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis { + value_to_holder = 0; } - // Now check against optional parameters as set by config... - if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis { - return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis))); - } - if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat { - return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat))); - } - if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis))); - } - if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs { - return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs))); - } - if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); - } + assert!(self.context.shutdown_scriptpubkey.is_some()); + let holder_shutdown_script = self.get_closing_scriptpubkey(); + let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap(); + let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint(); - // Convert things into internal flags and prep our state: + let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint); + (closing_transaction, total_fee_satoshis) + } - if config.channel_handshake_limits.force_announced_channel_preference { - if config.channel_handshake_config.announced_channel != announced_channel { - return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned())); - } - } + fn funding_outpoint(&self) -> OutPoint { + self.context.channel_transaction_parameters.funding_outpoint.unwrap() + } - let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config); - if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - // Protocol level safety check in place, although it should never happen because - // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat { - return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat))); - } - if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", - msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); - } - if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis { - return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis))); + /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`] + /// entirely. + /// + /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage + /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]). + /// + /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is + /// disconnected). + pub fn claim_htlc_while_disconnected_dropping_mon_update + (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) + where L::Target: Logger { + // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` + // (see equivalent if condition there). + assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0); + let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update + let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); + self.context.latest_monitor_update_id = mon_update_id; + if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { + assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. } + } - // check if the funder's amount for the initial commitment tx is sufficient - // for full fee payment plus a few HTLCs to ensure the channel will be useful. - let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat; - let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000; - if funders_amount_msat / 1000 < commitment_tx_fee { - return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee))); + fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { + // Either ChannelReady got set (which means it won't be unset) or there is no way any + // caller thought we could have something claimed (cause we wouldn't have accepted in an + // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, + // either. + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + panic!("Was asked to fulfill an HTLC when channel was not in an operational state"); } + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee; - // While it's reasonable for us to not meet the channel reserve initially (if they don't - // want to push much to us), our counterparty should always have more than our reserve. - if to_remote_satoshis < holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned())); - } + let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()); - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))) - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } - } - } else { None }; + // ChannelManager may generate duplicate claims/fails due to HTLC update events from + // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop + // these, but for now we just have to treat them as normal. - let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => Some(scriptpubkey), - Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())), - } - } else { None }; - - if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { - if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); + let mut pending_idx = core::usize::MAX; + let mut htlc_value_msat = 0; + for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { + if htlc.htlc_id == htlc_id_arg { + assert_eq!(htlc.payment_hash, payment_hash_calc); + match htlc.state { + InboundHTLCState::Committed => {}, + InboundHTLCState::LocalRemoved(ref reason) => { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + } else { + log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id())); + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); + } + return UpdateFulfillFetch::DuplicateClaim {}; + }, + _ => { + debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); + // Don't return in release mode here so that we can update channel_monitor + } + } + pending_idx = idx; + htlc_value_msat = htlc.amount_msat; + break; } } + if pending_idx == core::usize::MAX { + #[cfg(any(test, fuzzing))] + // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and + // this is simply a duplicate claim, not previously failed and we lost funds. + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; + } - let destination_script = match signer_provider.get_destination_script() { - Ok(script) => script, - Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())), + // Now update local state: + // + // We have to put the payment_preimage in the channel_monitor right away here to ensure we + // can claim it even if the channel hits the chain before we see their next commitment. + self.context.latest_monitor_update_id += 1; + let monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage_arg.clone(), + }], }; - let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - - let chan = Channel { - context: ChannelContext { - user_id, - - config: LegacyChannelConfig { - options: config.channel_config.clone(), - announced_channel, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, - }, + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + // Note that this condition is the same as the assertion in + // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - + // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we + // do not not get into this branch. + for pending_update in self.context.holding_cell_htlc_updates.iter() { + match pending_update { + &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + // Make sure we don't leave latest_monitor_update_id incremented here: + self.context.latest_monitor_update_id -= 1; + #[cfg(any(test, fuzzing))] + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; + } + }, + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id())); + // TODO: We may actually be able to switch to a fulfill here, though its + // rare enough it may not be worth the complexity burden. + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + } + }, + _ => {} + } + } + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state); + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { + payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, + }); + #[cfg(any(test, fuzzing))] + self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + } + #[cfg(any(test, fuzzing))] + self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - prev_config: None, + { + let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; + if let InboundHTLCState::Committed = htlc.state { + } else { + debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + } + log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); + } - inbound_handshake_limits_override: None, + UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + msg: Some(msgs::UpdateFulfillHTLC { + channel_id: self.context.channel_id(), + htlc_id: htlc_id_arg, + payment_preimage: payment_preimage_arg, + }), + } + } - temporary_channel_id: Some(msg.temporary_channel_id), - channel_id: msg.temporary_channel_id, - channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), - announcement_sigs_state: AnnouncementSigsState::NotSent, - secp_ctx, + pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { + let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked); + match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { + UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => { + // Even if we aren't supposed to let new monitor updates with commitment state + // updates run, we still need to push the preimage ChannelMonitorUpdateStep no + // matter what. Sadly, to push a new monitor update which flies before others + // already queued, we have to insert it into the pending queue and update the + // update_ids of all the following monitors. + let unblocked_update_pos = if release_cs_monitor && msg.is_some() { + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them + // to be strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { + update: monitor_update, blocked: false, + }); + self.context.pending_monitor_updates.len() - 1 + } else { + let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked) + .unwrap_or(self.context.pending_monitor_updates.len()); + let new_mon_id = self.context.pending_monitor_updates.get(insert_pos) + .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id); + monitor_update.update_id = new_mon_id; + self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate { + update: monitor_update, blocked: false, + }); + for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) { + held_update.update.update_id += 1; + } + if msg.is_some() { + debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); + let update = self.build_commitment_no_status_check(logger); + self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { + update, blocked: true, + }); + } + insert_pos + }; + self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new()); + UpdateFulfillCommitFetch::NewClaim { + monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos) + .expect("We just pushed the monitor update").update, + htlc_value_msat, + } + }, + UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, + } + } - latest_monitor_update_id: 0, + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill + /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, + /// however, fail more than once as we wait for an upstream failure to be irrevocably committed + /// before we fail backwards. + /// + /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always + /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be + /// [`ChannelError::Ignore`]. + pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) + -> Result<(), ChannelError> where L::Target: Logger { + self.fail_htlc(htlc_id_arg, err_packet, true, logger) + .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) + } - holder_signer, - shutdown_scriptpubkey, - destination_script, + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill + /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, + /// however, fail more than once as we wait for an upstream failure to be irrevocably committed + /// before we fail backwards. + /// + /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always + /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be + /// [`ChannelError::Ignore`]. + fn fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L) + -> Result, ChannelError> where L::Target: Logger { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + panic!("Was asked to fail an HTLC when channel was not in an operational state"); + } + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat: msg.push_msat, + // ChannelManager may generate duplicate claims/fails due to HTLC update events from + // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop + // these, but for now we just have to treat them as normal. - pending_inbound_htlcs: Vec::new(), - pending_outbound_htlcs: Vec::new(), - holding_cell_htlc_updates: Vec::new(), - pending_update_fee: None, - holding_cell_update_fee: None, - next_holder_htlc_id: 0, - next_counterparty_htlc_id: 0, - update_time_counter: 1, + let mut pending_idx = core::usize::MAX; + for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { + if htlc.htlc_id == htlc_id_arg { + match htlc.state { + InboundHTLCState::Committed => {}, + InboundHTLCState::LocalRemoved(ref reason) => { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + } else { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); + } + return Ok(None); + }, + _ => { + debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); + return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id))); + } + } + pending_idx = idx; + } + } + if pending_idx == core::usize::MAX { + #[cfg(any(test, fuzzing))] + // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this + // is simply a duplicate fail, not previously failed and we failed-back too early. + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); + } - resend_order: RAACommitmentOrder::CommitmentFirst, + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!"); + force_holding_cell = true; + } - monitor_pending_channel_ready: false, - monitor_pending_revoke_and_ack: false, - monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), - monitor_pending_failures: Vec::new(), - monitor_pending_finalized_fulfills: Vec::new(), + // Now update local state: + if force_holding_cell { + for pending_update in self.context.holding_cell_htlc_updates.iter() { + match pending_update { + &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + #[cfg(any(test, fuzzing))] + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); + } + }, + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); + return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + } + }, + _ => {} + } + } + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { + htlc_id: htlc_id_arg, + err_packet, + }); + return Ok(None); + } - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), + log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); + { + let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); + } - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw: None, + Ok(Some(msgs::UpdateFailHTLC { + channel_id: self.context.channel_id(), + htlc_id: htlc_id_arg, + reason: err_packet + })) + } - inbound_awaiting_accept: true, + // Message handlers: - funding_tx_confirmed_in: None, - funding_tx_confirmation_height: 0, - short_channel_id: None, - channel_creation_height: current_chain_height, + pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { + let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; - feerate_per_kw: msg.feerate_per_kw, - channel_value_satoshis: msg.funding_satoshis, - counterparty_dust_limit_satoshis: msg.dust_limit_satoshis, - holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000), - holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis), - holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, - holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), - minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)), + // Check sanity of message fields: + if !self.context.is_outbound() { + return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); + } + if self.context.channel_state != ChannelState::OurInitSent as u32 { + return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned())); + } + if msg.dust_limit_satoshis > 21000000 * 100000000 { + return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis))); + } + if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { + return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); + } + if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); + } + if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", + msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); + } + let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; + if msg.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); + } + let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if msg.to_self_delay > max_delay_acceptable { + return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay))); + } + if msg.max_accepted_htlcs < 1 { + return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if msg.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); + } - counterparty_forwarding_info: None, + // Now check against optional parameters as set by config... + if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { + return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); + } + if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); + } + if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); + } + if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { + return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); + } + if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } + if msg.minimum_depth > peer_limits.max_minimum_depth { + return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth))); + } - channel_transaction_parameters: ChannelTransactionParameters { - holder_pubkeys: pubkeys, - holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: false, - counterparty_parameters: Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.to_self_delay, - pubkeys: counterparty_pubkeys, - }), - funding_outpoint: None, - opt_anchors: if opt_anchors { Some(()) } else { None }, - opt_non_zero_fee_anchors: None - }, - funding_transaction: None, + if let Some(ty) = &msg.channel_type { + if *ty != self.context.channel_type { + return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); + } + } else if their_features.supports_channel_type() { + // Assume they've accepted the channel type as they said they understand it. + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + } + self.context.channel_type = channel_type; + } - counterparty_cur_commitment_point: Some(msg.first_per_commitment_point), - counterparty_prev_commitment_point: None, - counterparty_node_id, + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &msg.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; - counterparty_shutdown_scriptpubkey, + self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; + self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); + self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); + self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; + self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; - commitment_secrets: CounterpartyCommitmentSecrets::new(), + if peer_limits.trust_own_funding_0conf { + self.context.minimum_depth = Some(msg.minimum_depth); + } else { + self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); + } - channel_update_status: ChannelUpdateStatus::Enabled, - closing_signed_in_flight: false, + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: msg.funding_pubkey, + revocation_basepoint: msg.revocation_basepoint, + payment_point: msg.payment_point, + delayed_payment_basepoint: msg.delayed_payment_basepoint, + htlc_basepoint: msg.htlc_basepoint + }; - announcement_sigs: None, + self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: msg.to_self_delay, + pubkeys: counterparty_pubkeys, + }); - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); + self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; - workaround_lnd_bug_4006: None, - sent_message_awaiting_response: None, + self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; + self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. - latest_inbound_scid_alias: None, - outbound_scid_alias, + Ok(()) + } - channel_pending_event_emitted: false, - channel_ready_event_emitted: false, + fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { + let funding_script = self.context.get_funding_redeemscript(); - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + { + let trusted_tx = initial_commitment_tx.trust(); + let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + // They sign the holder commitment transaction... + log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", + log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), + encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), + encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); + } - channel_type, - channel_keys_id, + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - pending_monitor_updates: Vec::new(), - } - }; + let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); + let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - Ok(chan) - } + let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; - #[inline] - fn get_closing_scriptpubkey(&self) -> Script { - // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script - // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method - // outside of those situations will fail. - self.context.shutdown_scriptpubkey.clone().unwrap().into_inner() + // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. + Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) } - #[inline] - fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 { - let mut ret = - (4 + // version - 1 + // input count - 36 + // prevout - 1 + // script length (0) - 4 + // sequence - 1 + // output count - 4 // lock time - )*4 + // * 4 for non-witness parts - 2 + // witness marker and flag - 1 + // witness element count - 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script) - self.context.get_funding_redeemscript().len() as u64 + // funding witness script - 2*(1 + 71); // two signatures + sighash type flags - if let Some(spk) = a_scriptpubkey { - ret += ((8+1) + // output values and script length - spk.len() as u64) * 4; // scriptpubkey and witness multiplier + pub fn funding_created( + &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L + ) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> + where + SP::Target: SignerProvider, + L::Target: Logger + { + if self.context.is_outbound() { + return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); } - if let Some(spk) = b_scriptpubkey { - ret += ((8+1) + // output values and script length - spk.len() as u64) * 4; // scriptpubkey and witness multiplier + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT + // remember the channel, so it's safe to just send an error_message here and drop the + // channel. + return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned())); + } + if self.context.inbound_awaiting_accept { + return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned())); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } - ret - } - #[inline] - fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) { - assert!(self.context.pending_inbound_htlcs.is_empty()); - assert!(self.context.pending_outbound_htlcs.is_empty()); - assert!(self.context.pending_update_fee.is_none()); + let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + // This is an externally observable change before we finish all our checks. In particular + // funding_created_signature may fail. + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let mut total_fee_satoshis = proposed_total_fee_satoshis; - let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 }; - let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 }; + let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { + Ok(res) => res, + Err(ChannelError::Close(e)) => { + self.context.channel_transaction_parameters.funding_outpoint = None; + return Err(ChannelError::Close(e)); + }, + Err(e) => { + // The only error we know how to handle is ChannelError::Close, so we fall over here + // to make sure we don't continue with an inconsistent state. + panic!("unexpected error type from funding_created_signature {:?}", e); + } + }; - if value_to_holder < 0 { - assert!(self.context.is_outbound()); - total_fee_satoshis += (-value_to_holder) as u64; - } else if value_to_counterparty < 0 { - assert!(!self.context.is_outbound()); - total_fee_satoshis += (-value_to_counterparty) as u64; - } + let holder_commitment_tx = HolderCommitmentTransaction::new( + initial_commitment_tx, + msg.signature, + Vec::new(), + &self.context.get_holder_pubkeys().funding_pubkey, + self.context.counterparty_funding_pubkey() + ); - if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis { - value_to_counterparty = 0; - } + self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) + .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis { - value_to_holder = 0; - } + // Now that we're past error-generating stuff, update our local state: - assert!(self.context.shutdown_scriptpubkey.is_some()); - let holder_shutdown_script = self.get_closing_scriptpubkey(); - let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap(); - let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint(); + let funding_redeemscript = self.context.get_funding_redeemscript(); + let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, + shutdown_script, self.context.get_holder_selected_contest_delay(), + &self.context.destination_script, (funding_txo, funding_txo_script.clone()), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, + obscure_factor, + holder_commitment_tx, best_block, self.context.counterparty_node_id); - let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint); - (closing_transaction, total_fee_satoshis) - } + channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); - fn funding_outpoint(&self) -> OutPoint { - self.context.channel_transaction_parameters.funding_outpoint.unwrap() - } + self.context.channel_state = ChannelState::FundingSent as u32; + self.context.channel_id = funding_txo.to_channel_id(); + self.context.cur_counterparty_commitment_transaction_number -= 1; + self.context.cur_holder_commitment_transaction_number -= 1; - /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`] - /// entirely. - /// - /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage - /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]). - /// - /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is - /// disconnected). - pub fn claim_htlc_while_disconnected_dropping_mon_update - (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) - where L::Target: Logger { - // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` - // (see equivalent if condition there). - assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0); - let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update - let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); - self.context.latest_monitor_update_id = mon_update_id; - if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { - assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. - } + log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id())); + + let need_channel_ready = self.check_get_channel_ready(0).is_some(); + self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + + Ok((msgs::FundingSigned { + channel_id: self.context.channel_id, + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }, channel_monitor)) } - fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { - // Either ChannelReady got set (which means it won't be unset) or there is no way any - // caller thought we could have something claimed (cause we wouldn't have accepted in an - // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, - // either. - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - panic!("Was asked to fulfill an HTLC when channel was not in an operational state"); + /// Handles a funding_signed message from the remote end. + /// If this call is successful, broadcast the funding transaction (and not before!) + pub fn funding_signed( + &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L + ) -> Result, ChannelError> + where + SP::Target: SignerProvider, + L::Target: Logger + { + if !self.context.is_outbound() { + return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); + } + if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { + return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned())); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()); + let funding_script = self.context.get_funding_redeemscript(); - // ChannelManager may generate duplicate claims/fails due to HTLC update events from - // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop - // these, but for now we just have to treat them as normal. + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); + let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - let mut pending_idx = core::usize::MAX; - let mut htlc_value_msat = 0; - for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { - if htlc.htlc_id == htlc_id_arg { - assert_eq!(htlc.payment_hash, payment_hash_calc); - match htlc.state { - InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id())); - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - } - return UpdateFulfillFetch::DuplicateClaim {}; - }, - _ => { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - // Don't return in release mode here so that we can update channel_monitor - } - } - pending_idx = idx; - htlc_value_msat = htlc.amount_msat; - break; + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + + let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + { + let trusted_tx = initial_commitment_tx.trust(); + let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + // They sign our commitment transaction, allowing us to broadcast the tx if we wish. + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { + return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); } } - if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and - // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return UpdateFulfillFetch::DuplicateClaim {}; - } - // Now update local state: - // - // We have to put the payment_preimage in the channel_monitor right away here to ensure we - // can claim it even if the channel hits the chain before we see their next commitment. - self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage_arg.clone(), - }], - }; + let holder_commitment_tx = HolderCommitmentTransaction::new( + initial_commitment_tx, + msg.signature, + Vec::new(), + &self.context.get_holder_pubkeys().funding_pubkey, + self.context.counterparty_funding_pubkey() + ); - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { - // Note that this condition is the same as the assertion in - // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - - // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we - // do not not get into this branch. - for pending_update in self.context.holding_cell_htlc_updates.iter() { - match pending_update { - &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - // Make sure we don't leave latest_monitor_update_id incremented here: - self.context.latest_monitor_update_id -= 1; - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return UpdateFulfillFetch::DuplicateClaim {}; - } - }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id())); - // TODO: We may actually be able to switch to a fulfill here, though its - // rare enough it may not be worth the complexity burden. - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; - } - }, - _ => {} - } - } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state); - self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { - payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, - }); - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) + .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; + + + let funding_redeemscript = self.context.get_funding_redeemscript(); + let funding_txo = self.context.get_funding_txo().unwrap(); + let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, + shutdown_script, self.context.get_holder_selected_contest_delay(), + &self.context.destination_script, (funding_txo, funding_txo_script), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, + obscure_factor, + holder_commitment_tx, best_block, self.context.counterparty_node_id); + + channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + + assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! + self.context.channel_state = ChannelState::FundingSent as u32; + self.context.cur_holder_commitment_transaction_number -= 1; + self.context.cur_counterparty_commitment_transaction_number -= 1; + + log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id())); + + let need_channel_ready = self.check_get_channel_ready(0).is_some(); + self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + Ok(channel_monitor) + } + + /// Handles a channel_ready message from our peer. If we've already sent our channel_ready + /// and the channel is now usable (and public), this may generate an announcement_signatures to + /// reply with. + pub fn channel_ready( + &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash, + user_config: &UserConfig, best_block: &BestBlock, logger: &L + ) -> Result, ChannelError> + where + NS::Target: NodeSigner, + L::Target: Logger + { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + self.context.workaround_lnd_bug_4006 = Some(msg.clone()); + return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); } - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - { - let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - if let InboundHTLCState::Committed = htlc.state { - } else { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + if let Some(scid_alias) = msg.short_channel_id_alias { + if Some(scid_alias) != self.context.short_channel_id { + // The scid alias provided can be used to route payments *from* our counterparty, + // i.e. can be used for inbound payments and provided in invoices, but is not used + // when routing outbound payments. + self.context.latest_inbound_scid_alias = Some(scid_alias); } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } - UpdateFulfillFetch::NewClaim { - monitor_update, - htlc_value_msat, - msg: Some(msgs::UpdateFulfillHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc_id_arg, - payment_preimage: payment_preimage_arg, - }), - } - } + let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { - let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked); - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => { - // Even if we aren't supposed to let new monitor updates with commitment state - // updates run, we still need to push the preimage ChannelMonitorUpdateStep no - // matter what. Sadly, to push a new monitor update which flies before others - // already queued, we have to insert it into the pending queue and update the - // update_ids of all the following monitors. - let unblocked_update_pos = if release_cs_monitor && msg.is_some() { - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them - // to be strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { - update: monitor_update, blocked: false, - }); - self.context.pending_monitor_updates.len() - 1 + if non_shutdown_state == ChannelState::FundingSent as u32 { + self.context.channel_state |= ChannelState::TheirChannelReady as u32; + } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { + self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); + self.context.update_time_counter += 1; + } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 || + // If we reconnected before sending our `channel_ready` they may still resend theirs: + (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) == + (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32)) + { + // They probably disconnected/reconnected and re-sent the channel_ready, which is + // required, or they're sending a fresh SCID alias. + let expected_point = + if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { + // If they haven't ever sent an updated point, the point they send should match + // the current one. + self.context.counterparty_cur_commitment_point + } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 { + // If we've advanced the commitment number once, the second commitment point is + // at `counterparty_prev_commitment_point`, which is not yet revoked. + debug_assert!(self.context.counterparty_prev_commitment_point.is_some()); + self.context.counterparty_prev_commitment_point } else { - let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked) - .unwrap_or(self.context.pending_monitor_updates.len()); - let new_mon_id = self.context.pending_monitor_updates.get(insert_pos) - .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id); - monitor_update.update_id = new_mon_id; - self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate { - update: monitor_update, blocked: false, - }); - for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) { - held_update.update.update_id += 1; - } - if msg.is_some() { - debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); - let update = self.build_commitment_no_status_check(logger); - self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { - update, blocked: true, - }); - } - insert_pos + // If they have sent updated points, channel_ready is always supposed to match + // their "first" point, which we re-derive here. + Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice( + &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available") + ).expect("We already advanced, so previous secret keys should have been validated already"))) }; - self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new()); - UpdateFulfillCommitFetch::NewClaim { - monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos) - .expect("We just pushed the monitor update").update, - htlc_value_msat, - } - }, - UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, + if expected_point != Some(msg.next_per_commitment_point) { + return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned())); + } + return Ok(None); + } else { + return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())); } - } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. - pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) - -> Result<(), ChannelError> where L::Target: Logger { - self.fail_htlc(htlc_id_arg, err_packet, true, logger) - .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) - } + self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; + self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. - fn fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L) - -> Result, ChannelError> where L::Target: Logger { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - panic!("Was asked to fail an HTLC when channel was not in an operational state"); - } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); + log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id())); - // ChannelManager may generate duplicate claims/fails due to HTLC update events from - // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop - // these, but for now we just have to treat them as normal. + Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) + } - let mut pending_idx = core::usize::MAX; - for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { - if htlc.htlc_id == htlc_id_arg { - match htlc.state { - InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - } - return Ok(None); - }, - _ => { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id))); - } - } - pending_idx = idx; - } - } - if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this - // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> + where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { + // We can't accept HTLCs sent after we've sent a shutdown. + let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32); + if local_sent_shutdown { + pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); } - - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { - debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!"); - force_holding_cell = true; + // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. + let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32); + if remote_sent_shutdown { + return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); } - - // Now update local state: - if force_holding_cell { - for pending_update in self.context.holding_cell_htlc_updates.iter() { - match pending_update { - &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); - } - }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); - } - }, - _ => {} - } - } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); - self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { - htlc_id: htlc_id_arg, - err_packet, - }); - return Ok(None); - } - - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); - { - let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); - } - - Ok(Some(msgs::UpdateFailHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc_id_arg, - reason: err_packet - })) - } - - // Message handlers: - - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; - - // Check sanity of message fields: - if !self.context.is_outbound() { - return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); - } - if self.context.channel_state != ChannelState::OurInitSent as u32 { - return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned())); - } - if msg.dust_limit_satoshis > 21000000 * 100000000 { - return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); - } - if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); - } - let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned())); } - let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.to_self_delay > max_delay_acceptable { - return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay))); + if msg.amount_msat > self.context.channel_value_satoshis * 1000 { + return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned())); } - if msg.max_accepted_htlcs < 1 { - return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + if msg.amount_msat == 0 { + return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned())); } - if msg.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); + if msg.amount_msat < self.context.holder_htlc_minimum_msat { + return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); } - // Now check against optional parameters as set by config... - if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { - return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); - } - if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); - } - if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { - return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); - } - if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); + if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { + return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); } - if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { + return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); } - if msg.minimum_depth > peer_limits.max_minimum_depth { - return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth))); + // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet + // the reserve_satoshis we told them to always have as direct payment so that they lose + // something if we punish them for broadcasting an old state). + // Note that we don't really care about having a small/no to_remote output in our local + // commitment transactions, as the purpose of the channel reserve is to ensure we can + // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be + // present in the next commitment transaction we send them (at least for fulfilled ones, + // failed ones won't modify value_to_self). + // Note that we will send HTLCs which another instance of rust-lightning would think + // violate the reserve value if we do not do this (as we forget inbound HTLCs from the + // Channel state once they will not be present in the next received commitment + // transaction). + let mut removed_outbound_total_msat = 0; + for ref htlc in self.context.pending_outbound_htlcs.iter() { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } } - if let Some(ty) = &msg.channel_type { - if *ty != self.context.channel_type { - return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); - } - } else if their_features.supports_channel_type() { - // Assume they've accepted the channel type as they said they understand it. + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { + (0, 0) } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) + }; + let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { + let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); + pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } - self.context.channel_type = channel_type; } - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } + let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { + let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); + pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } - } else { None }; - - self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; - self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); - self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; - self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; + } - if peer_limits.trust_own_funding_0conf { - self.context.minimum_depth = Some(msg.minimum_depth); - } else { - self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); + let pending_value_to_self_msat = + self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; + let pending_remote_value_msat = + self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; + if pending_remote_value_msat < msg.amount_msat { + return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned())); } - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.funding_pubkey, - revocation_basepoint: msg.revocation_basepoint, - payment_point: msg.payment_point, - delayed_payment_basepoint: msg.delayed_payment_basepoint, - htlc_basepoint: msg.htlc_basepoint + // Check that the remote can afford to pay for this HTLC on-chain at the current + // feerate_per_kw, while maintaining their channel reserve (as required by the spec). + let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations + }; + if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat { + return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); }; - self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.to_self_delay, - pubkeys: counterparty_pubkeys, - }); + if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 { + return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); + } - self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); - self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + if !self.context.is_outbound() { + // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from + // the spec because in the spec, the fee spike buffer requirement doesn't exist on the + // receiver's side, only on the sender's. + // Note that when we eventually remove support for fee updates and switch to anchor output + // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep + // the extra htlc when calculating the next remote commitment transaction fee as we should + // still be able to afford adding this HTLC plus one more future HTLC, regardless of being + // sensitive to fee spikes. + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { + // Note that if the pending_forward_status is not updated here, then it's because we're already failing + // the HTLC, i.e. its status is already set to failing. + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id())); + pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); + } + } else { + // Check that they won't violate our local required channel reserve by adding this HTLC. + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); + if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat { + return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned())); + } + } + if self.context.next_counterparty_htlc_id != msg.htlc_id { + return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id))); + } + if msg.cltv_expiry >= 500000000 { + return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned())); + } - self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; - self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 { + if let PendingHTLCStatus::Forward(_) = pending_forward_status { + panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing"); + } + } - Ok(()) + // Now update local state: + self.context.next_counterparty_htlc_id += 1; + self.context.pending_inbound_htlcs.push(InboundHTLCOutput { + htlc_id: msg.htlc_id, + amount_msat: msg.amount_msat, + payment_hash: msg.payment_hash, + cltv_expiry: msg.cltv_expiry, + state: InboundHTLCState::RemoteAnnounced(pending_forward_status), + }); + Ok(()) } - fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { - let funding_script = self.context.get_funding_redeemscript(); + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed + #[inline] + fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option, fail_reason: Option) -> Result<&OutboundHTLCOutput, ChannelError> { + assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage"); + for htlc in self.context.pending_outbound_htlcs.iter_mut() { + if htlc.htlc_id == htlc_id { + let outcome = match check_preimage { + None => fail_reason.into(), + Some(payment_preimage) => { + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); + if payment_hash != htlc.payment_hash { + return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id))); + } + OutboundHTLCOutcome::Success(Some(payment_preimage)) + } + }; + match htlc.state { + OutboundHTLCState::LocalAnnounced(_) => + return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))), + OutboundHTLCState::Committed => { + htlc.state = OutboundHTLCState::RemoteRemoved(outcome); + }, + OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) => + return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))), + } + return Ok(htlc); + } + } + Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) + } - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; - { - let trusted_tx = initial_commitment_tx.trust(); - let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); - // They sign the holder commitment transaction... - log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", - log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), - encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), - encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); - secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); + pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); + } + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); } - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat)) + } - let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); - let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned())); + } + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned())); + } - let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; + self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?; + Ok(()) + } - // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. - Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) + pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned())); + } + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned())); + } + + self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?; + Ok(()) } - pub fn funding_created( - &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> - where - SP::Target: SignerProvider, - L::Target: Logger + pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> + where L::Target: Logger { - if self.context.is_outbound() { - return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { - // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT - // remember the channel, so it's safe to just send an error_message here and drop the - // channel. - return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned())); + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())); } - if self.context.inbound_awaiting_accept { - return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned())); + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())); } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { + return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); } - let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; - self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - // This is an externally observable change before we finish all our checks. In particular - // funding_created_signature may fail. - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let funding_script = self.context.get_funding_redeemscript(); - let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { - Ok(res) => res, - Err(ChannelError::Close(e)) => { - self.context.channel_transaction_parameters.funding_outpoint = None; - return Err(ChannelError::Close(e)); - }, - Err(e) => { - // The only error we know how to handle is ChannelError::Close, so we fall over here - // to make sure we don't continue with an inconsistent state. - panic!("unexpected error type from funding_created_signature {:?}", e); + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); + let commitment_txid = { + let trusted_tx = commitment_stats.tx.trust(); + let bitcoin_tx = trusted_tx.built_transaction(); + let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + + log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", + log_bytes!(msg.signature.serialize_compact()[..]), + log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), + log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) { + return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); } + bitcoin_tx.txid }; + let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); + + // If our counterparty updated the channel fee in this commitment transaction, check that + // they can actually afford the new fee now. + let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee { + update_state == FeeUpdateState::RemoteAnnounced + } else { false }; + if update_fee { + debug_assert!(!self.context.is_outbound()); + let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; + if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { + return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())); + } + } + #[cfg(any(test, fuzzing))] + { + if self.context.is_outbound() { + let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take(); + *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; + if let Some(info) = projected_commit_tx_info { + let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len() + + self.context.holding_cell_htlc_updates.len(); + if info.total_pending_htlcs == total_pending_htlcs + && info.next_holder_htlc_id == self.context.next_holder_htlc_id + && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id + && info.feerate == self.context.feerate_per_kw { + assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000); + } + } + } + } + + if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs { + return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))); + } + + // Up to LDK 0.0.115, HTLC information was required to be duplicated in the + // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed + // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of + // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for + // backwards compatibility, we never use it in production. To provide test coverage, here, + // we randomly decide (in test/fuzzing builds) to use the new vec sometimes. + #[allow(unused_assignments, unused_mut)] + let mut separate_nondust_htlc_sources = false; + #[cfg(all(feature = "std", any(test, fuzzing)))] { + use core::hash::{BuildHasher, Hasher}; + // Get a random value using the only std API to do so - the DefaultHasher + let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish(); + separate_nondust_htlc_sources = rand_val % 2 == 0; + } + + let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len()); + let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len()); + for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() { + if let Some(_) = htlc.transaction_output_index { + let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw, + self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(), + false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); + + let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys); + let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; + let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); + log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", + log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), + encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id())); + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { + return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); + } + if !separate_nondust_htlc_sources { + htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take())); + } + } else { + htlcs_and_sigs.push((htlc, None, source_opt.take())); + } + if separate_nondust_htlc_sources { + if let Some(source) = source_opt.take() { + nondust_htlc_sources.push(source); + } + } + debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere"); + } let holder_commitment_tx = HolderCommitmentTransaction::new( - initial_commitment_tx, + commitment_stats.tx, msg.signature, - Vec::new(), + msg.htlc_signatures.clone(), &self.context.get_holder_pubkeys().funding_pubkey, self.context.counterparty_funding_pubkey() ); - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) + self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - // Now that we're past error-generating stuff, update our local state: + // Update state now that we've passed all the can-fail calls... + let mut need_commitment = false; + if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee { + if *update_state == FeeUpdateState::RemoteAnnounced { + *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce; + need_commitment = true; + } + } - let funding_redeemscript = self.context.get_funding_redeemscript(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); - let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.context.get_holder_selected_contest_delay(), - &self.context.destination_script, (funding_txo, funding_txo_script.clone()), - &self.context.channel_transaction_parameters, - funding_redeemscript.clone(), self.context.channel_value_satoshis, - obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); + for htlc in self.context.pending_inbound_htlcs.iter_mut() { + let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { + Some(forward_info.clone()) + } else { None }; + if let Some(forward_info) = new_forward { + log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", + log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); + need_commitment = true; + } + } + let mut claimed_htlcs = Vec::new(); + for htlc in self.context.pending_outbound_htlcs.iter_mut() { + if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state { + log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", + log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); + // Grab the preimage, if it exists, instead of cloning + let mut reason = OutboundHTLCOutcome::Success(None); + mem::swap(outcome, &mut reason); + if let OutboundHTLCOutcome::Success(Some(preimage)) = reason { + // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b) + // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could + // have a `Success(None)` reason. In this case we could forget some HTLC + // claims, but such an upgrade is unlikely and including claimed HTLCs here + // fixes a bug which the user was exposed to on 0.0.104 when they started the + // claim anyway. + claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage)); + } + htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason); + need_commitment = true; + } + } - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + self.context.latest_monitor_update_id += 1; + let mut monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { + commitment_tx: holder_commitment_tx, + htlc_outputs: htlcs_and_sigs, + claimed_htlcs, + nondust_htlc_sources, + }] + }; - self.context.channel_state = ChannelState::FundingSent as u32; - self.context.channel_id = funding_txo.to_channel_id(); - self.context.cur_counterparty_commitment_transaction_number -= 1; self.context.cur_holder_commitment_transaction_number -= 1; + // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call + // build_commitment_no_status_check() next which will reset this to RAAFirst. + self.context.resend_order = RAACommitmentOrder::CommitmentFirst; - log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id())); + if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 { + // In case we initially failed monitor updating without requiring a response, we need + // to make sure the RAA gets sent first. + self.context.monitor_pending_revoke_and_ack = true; + if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + // If we were going to send a commitment_signed after the RAA, go ahead and do all + // the corresponding HTLC status updates so that get_last_commitment_update + // includes the right HTLCs. + self.context.monitor_pending_commitment_signed = true; + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + } + log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", + log_bytes!(self.context.channel_id)); + return Ok(self.push_ret_blockable_mon_update(monitor_update)); + } - let need_channel_ready = self.check_get_channel_ready(0).is_some(); - self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - + // we'll send one right away when we get the revoke_and_ack when we + // free_holding_cell_htlcs(). + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + true + } else { false }; - Ok((msgs::FundingSigned { - channel_id: self.context.channel_id, - signature, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }, channel_monitor)) + log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", + log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); + self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); + return Ok(self.push_ret_blockable_mon_update(monitor_update)); } - /// Handles a funding_signed message from the remote end. - /// If this call is successful, broadcast the funding transaction (and not before!) - pub fn funding_signed( - &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result, ChannelError> - where - SP::Target: SignerProvider, - L::Target: Logger - { - if !self.context.is_outbound() { - return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); - } - if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { - return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned())); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); - } - - let funding_script = self.context.get_funding_redeemscript(); + /// Public version of the below, checking relevant preconditions first. + /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and + /// returns `(None, Vec::new())`. + pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { + if self.context.channel_state >= ChannelState::ChannelReady as u32 && + (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { + self.free_holding_cell_htlcs(logger) + } else { (None, Vec::new()) } + } - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); - let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + /// Frees any pending commitment updates in the holding cell, generating the relevant messages + /// for our counterparty. + fn free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { + assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); + if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { + log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), + if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id())); - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + let mut monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! + updates: Vec::new(), + }; - let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; - { - let trusted_tx = initial_commitment_tx.trust(); - let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); - // They sign our commitment transaction, allowing us to broadcast the tx if we wish. - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { - return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); + let mut htlc_updates = Vec::new(); + mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates); + let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len()); + let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len()); + let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len()); + let mut htlcs_to_fail = Vec::new(); + for htlc_update in htlc_updates.drain(..) { + // Note that this *can* fail, though it should be due to rather-rare conditions on + // fee races with adding too many outputs which push our total payments just over + // the limit. In case it's less rare than I anticipate, we may want to revisit + // handling this case better and maybe fulfilling some of the HTLCs while attempting + // to rebalance channels. + match &htlc_update { + &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => { + match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) { + Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()), + Err(e) => { + match e { + ChannelError::Ignore(ref msg) => { + log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", + log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id())); + // If we fail to send here, then this HTLC should + // be failed backwards. Failing to send here + // indicates that this HTLC may keep being put back + // into the holding cell without ever being + // successfully forwarded/failed/fulfilled, causing + // our counterparty to eventually close on us. + htlcs_to_fail.push((source.clone(), *payment_hash)); + }, + _ => { + panic!("Got a non-IgnoreError action trying to send holding cell HTLC"); + }, + } + } + } + }, + &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { + // If an HTLC claim was previously added to the holding cell (via + // `get_update_fulfill_htlc`, then generating the claim message itself must + // not fail - any in between attempts to claim the HTLC will have resulted + // in it hitting the holding cell again and we cannot change the state of a + // holding cell HTLC from fulfill to anything else. + let (update_fulfill_msg_option, mut additional_monitor_update) = + if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { + (msg, monitor_update) + } else { unreachable!() }; + update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); + monitor_update.updates.append(&mut additional_monitor_update.updates); + }, + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { + match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) { + Ok(update_fail_msg_option) => { + // If an HTLC failure was previously added to the holding cell (via + // `queue_fail_htlc`) then generating the fail message itself must + // not fail - we should never end up in a state where we double-fail + // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait + // for a full revocation before failing. + update_fail_htlcs.push(update_fail_msg_option.unwrap()) + }, + Err(e) => { + if let ChannelError::Ignore(_) = e {} + else { + panic!("Got a non-IgnoreError action trying to fail holding cell HTLC"); + } + } + } + }, + } } - } + if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() { + return (None, htlcs_to_fail); + } + let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() { + self.send_update_fee(feerate, false, logger) + } else { + None + }; - let holder_commitment_tx = HolderCommitmentTransaction::new( - initial_commitment_tx, - msg.signature, - Vec::new(), - &self.context.get_holder_pubkeys().funding_pubkey, - self.context.counterparty_funding_pubkey() - ); + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id + // but we want them to be strictly increasing by one, so reset it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; + log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", + log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, + update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len()); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) + } else { + (None, Vec::new()) + } + } - let funding_redeemscript = self.context.get_funding_redeemscript(); - let funding_txo = self.context.get_funding_txo().unwrap(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); - let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.context.get_holder_selected_contest_delay(), - &self.context.destination_script, (funding_txo, funding_txo_script), - &self.context.channel_transaction_parameters, - funding_redeemscript.clone(), self.context.channel_value_satoshis, - obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); - - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); - - assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! - self.context.channel_state = ChannelState::FundingSent as u32; - self.context.cur_holder_commitment_transaction_number -= 1; - self.context.cur_counterparty_commitment_transaction_number -= 1; - - log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id())); - - let need_channel_ready = self.check_get_channel_ready(0).is_some(); - self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); - Ok(channel_monitor) - } - - /// Handles a channel_ready message from our peer. If we've already sent our channel_ready - /// and the channel is now usable (and public), this may generate an announcement_signatures to - /// reply with. - pub fn channel_ready( - &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash, - user_config: &UserConfig, best_block: &BestBlock, logger: &L - ) -> Result, ChannelError> - where - NS::Target: NodeSigner, - L::Target: Logger + /// Handles receiving a remote's revoke_and_ack. Note that we may return a new + /// commitment_signed message here in case we had pending outbound HTLCs to add which were + /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, + /// generating an appropriate error *after* the channel state has been updated based on the + /// revoke_and_ack message. + pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError> + where L::Target: Logger, { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); + } if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - self.context.workaround_lnd_bug_4006 = Some(msg.clone()); - return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); + return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned())); } - - if let Some(scid_alias) = msg.short_channel_id_alias { - if Some(scid_alias) != self.context.short_channel_id { - // The scid alias provided can be used to route payments *from* our counterparty, - // i.e. can be used for inbound payments and provided in invoices, but is not used - // when routing outbound payments. - self.context.latest_inbound_scid_alias = Some(scid_alias); - } + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { + return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned())); } - let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); + let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned()); - if non_shutdown_state == ChannelState::FundingSent as u32 { - self.context.channel_state |= ChannelState::TheirChannelReady as u32; - } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { - self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); - self.context.update_time_counter += 1; - } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 || - // If we reconnected before sending our `channel_ready` they may still resend theirs: - (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) == - (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32)) - { - // They probably disconnected/reconnected and re-sent the channel_ready, which is - // required, or they're sending a fresh SCID alias. - let expected_point = - if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { - // If they haven't ever sent an updated point, the point they send should match - // the current one. - self.context.counterparty_cur_commitment_point - } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 { - // If we've advanced the commitment number once, the second commitment point is - // at `counterparty_prev_commitment_point`, which is not yet revoked. - debug_assert!(self.context.counterparty_prev_commitment_point.is_some()); - self.context.counterparty_prev_commitment_point - } else { - // If they have sent updated points, channel_ready is always supposed to match - // their "first" point, which we re-derive here. - Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice( - &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available") - ).expect("We already advanced, so previous secret keys should have been validated already"))) - }; - if expected_point != Some(msg.next_per_commitment_point) { - return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned())); + if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point { + if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point { + return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned())); } - return Ok(None); - } else { - return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())); } - self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; - self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - - log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id())); - - Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) - } - - pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> - where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { - // We can't accept HTLCs sent after we've sent a shutdown. - let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32); - if local_sent_shutdown { - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); - } - // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. - let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32); - if remote_sent_shutdown { - return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); - } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned())); - } - if msg.amount_msat > self.context.channel_value_satoshis * 1000 { - return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned())); - } - if msg.amount_msat == 0 { - return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned())); - } - if msg.amount_msat < self.context.holder_htlc_minimum_msat { - return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat))); + if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 { + // Our counterparty seems to have burned their coins to us (by revoking a state when we + // haven't given them a new commitment transaction to broadcast). We should probably + // take advantage of this by updating our channel monitor, sending them an error, and + // waiting for them to broadcast their latest (now-revoked claim). But, that would be a + // lot of work, and there's some chance this is all a misunderstanding anyway. + // We have to do *something*, though, since our signer may get mad at us for otherwise + // jumping a remote commitment number, so best to just force-close and move on. + return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned())); } - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { - return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); - } - if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); - } - // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet - // the reserve_satoshis we told them to always have as direct payment so that they lose - // something if we punish them for broadcasting an old state). - // Note that we don't really care about having a small/no to_remote output in our local - // commitment transactions, as the purpose of the channel reserve is to ensure we can - // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be - // present in the next commitment transaction we send them (at least for fulfilled ones, - // failed ones won't modify value_to_self). - // Note that we will send HTLCs which another instance of rust-lightning would think - // violate the reserve value if we do not do this (as we forget inbound HTLCs from the - // Channel state once they will not be present in the next received commitment - // transaction). - let mut removed_outbound_total_msat = 0; - for ref htlc in self.context.pending_outbound_htlcs.iter() { - if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state { - removed_outbound_total_msat += htlc.amount_msat; - } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { - removed_outbound_total_msat += htlc.amount_msat; - } + #[cfg(any(test, fuzzing))] + { + *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; + *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; } - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() { - (0, 0) - } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) + self.context.holder_signer.validate_counterparty_revocation( + self.context.cur_counterparty_commitment_transaction_number + 1, + &secret + ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?; + + self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret) + .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?; + self.context.latest_monitor_update_id += 1; + let mut monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::CommitmentSecret { + idx: self.context.cur_counterparty_commitment_transaction_number + 1, + secret: msg.per_commitment_secret, + }], }; - let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; - if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; - if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } + // Update state now that we've passed all the can-fail calls... + // (note that we may still fail to generate the new commitment_signed message, but that's + // OK, we step the channel here and *then* if the new generation fails we can fail the + // channel based on that, but stepping stuff here should be safe either way. + self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32); + self.context.sent_message_awaiting_response = None; + self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; + self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); + self.context.cur_counterparty_commitment_transaction_number -= 1; - let pending_value_to_self_msat = - self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; - let pending_remote_value_msat = - self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; - if pending_remote_value_msat < msg.amount_msat { - return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned())); + if self.context.announcement_sigs_state == AnnouncementSigsState::Committed { + self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } - // Check that the remote can afford to pay for this HTLC on-chain at the current - // feerate_per_kw, while maintaining their channel reserve (as required by the spec). - let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations - }; - if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat { - return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); - }; + log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id())); + let mut to_forward_infos = Vec::new(); + let mut revoked_htlcs = Vec::new(); + let mut finalized_claimed_htlcs = Vec::new(); + let mut update_fail_htlcs = Vec::new(); + let mut update_fail_malformed_htlcs = Vec::new(); + let mut require_commitment = false; + let mut value_to_self_msat_diff: i64 = 0; - if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 { - return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); - } + { + // Take references explicitly so that we can hold multiple references to self.context. + let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs; + let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs; - if !self.context.is_outbound() { - // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from - // the spec because in the spec, the fee spike buffer requirement doesn't exist on the - // receiver's side, only on the sender's. - // Note that when we eventually remove support for fee updates and switch to anchor output - // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep - // the extra htlc when calculating the next remote commitment transaction fee as we should - // still be able to afford adding this HTLC plus one more future HTLC, regardless of being - // sensitive to fee spikes. - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { - // Note that if the pending_forward_status is not updated here, then it's because we're already failing - // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id())); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); + // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug) + pending_inbound_htlcs.retain(|htlc| { + if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { + log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0)); + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + value_to_self_msat_diff += htlc.amount_msat as i64; + } + false + } else { true } + }); + pending_outbound_htlcs.retain(|htlc| { + if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state { + log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0)); + if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :( + revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); + } else { + finalized_claimed_htlcs.push(htlc.source.clone()); + // They fulfilled, so we sent them money + value_to_self_msat_diff -= htlc.amount_msat as i64; + } + false + } else { true } + }); + for htlc in pending_inbound_htlcs.iter_mut() { + let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state { + true + } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state { + true + } else { false }; + if swap { + let mut state = InboundHTLCState::Committed; + mem::swap(&mut state, &mut htlc.state); + + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); + require_commitment = true; + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { + match forward_info { + PendingHTLCStatus::Fail(fail_msg) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0)); + require_commitment = true; + match fail_msg { + HTLCFailureMsg::Relay(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); + update_fail_htlcs.push(msg) + }, + HTLCFailureMsg::Malformed(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); + update_fail_malformed_htlcs.push(msg) + }, + } + }, + PendingHTLCStatus::Forward(forward_info) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0)); + to_forward_infos.push((forward_info, htlc.htlc_id)); + htlc.state = InboundHTLCState::Committed; + } + } + } + } } - } else { - // Check that they won't violate our local required channel reserve by adding this HTLC. - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); - if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat { - return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned())); + for htlc in pending_outbound_htlcs.iter_mut() { + if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { + log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0)); + htlc.state = OutboundHTLCState::Committed; + } + if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { + log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + // Grab the preimage, if it exists, instead of cloning + let mut reason = OutboundHTLCOutcome::Success(None); + mem::swap(outcome, &mut reason); + htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason); + require_commitment = true; + } } } - if self.context.next_counterparty_htlc_id != msg.htlc_id { - return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id))); - } - if msg.cltv_expiry >= 500000000 { - return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned())); + self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64; + + if let Some((feerate, update_state)) = self.context.pending_update_fee { + match update_state { + FeeUpdateState::Outbound => { + debug_assert!(self.context.is_outbound()); + log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate); + self.context.feerate_per_kw = feerate; + self.context.pending_update_fee = None; + }, + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); }, + FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { + debug_assert!(!self.context.is_outbound()); + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); + require_commitment = true; + self.context.feerate_per_kw = feerate; + self.context.pending_update_fee = None; + }, + } } - if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 { - if let PendingHTLCStatus::Forward(_) = pending_forward_status { - panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing"); + if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 { + // We can't actually generate a new commitment transaction (incl by freeing holding + // cells) while we can't update the monitor, so we just return what we have. + if require_commitment { + self.context.monitor_pending_commitment_signed = true; + // When the monitor updating is restored we'll call get_last_commitment_update(), + // which does not update state, but we're definitely now awaiting a remote revoke + // before we can step forward any more, so set it here. + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); } + self.context.monitor_pending_forwards.append(&mut to_forward_infos); + self.context.monitor_pending_failures.append(&mut revoked_htlcs); + self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id())); + return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); } - // Now update local state: - self.context.next_counterparty_htlc_id += 1; - self.context.pending_inbound_htlcs.push(InboundHTLCOutput { - htlc_id: msg.htlc_id, - amount_msat: msg.amount_msat, - payment_hash: msg.payment_hash, - cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(pending_forward_status), - }); - Ok(()) - } + match self.free_holding_cell_htlcs(logger) { + (Some(_), htlcs_to_fail) => { + let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update; + // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); - /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed - #[inline] - fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option, fail_reason: Option) -> Result<&OutboundHTLCOutput, ChannelError> { - assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage"); - for htlc in self.context.pending_outbound_htlcs.iter_mut() { - if htlc.htlc_id == htlc_id { - let outcome = match check_preimage { - None => fail_reason.into(), - Some(payment_preimage) => { - let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); - if payment_hash != htlc.payment_hash { - return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id))); - } - OutboundHTLCOutcome::Success(Some(payment_preimage)) - } - }; - match htlc.state { - OutboundHTLCState::LocalAnnounced(_) => - return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))), - OutboundHTLCState::Committed => { - htlc.state = OutboundHTLCState::RemoteRemoved(outcome); - }, - OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) => - return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))), + self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); + Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) + }, + (None, htlcs_to_fail) => { + if require_commitment { + let mut additional_update = self.build_commitment_no_status_check(logger); + + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + + log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", + log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); + self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); + Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) + } else { + log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id())); + self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); + Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) } - return Ok(htlc); } } - Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) } - pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); - } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); - } - - self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat)) + /// Queues up an outbound update fee by placing it in the holding cell. You should call + /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the + /// commitment update. + pub fn queue_update_fee(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger { + let msg_opt = self.send_update_fee(feerate_per_kw, true, logger); + assert!(msg_opt.is_none(), "We forced holding cell?"); } - pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned())); + /// Adds a pending update to this channel. See the doc for send_htlc for + /// further details on the optionness of the return value. + /// If our balance is too low to cover the cost of the next commitment transaction at the + /// new feerate, the update is cancelled. + /// + /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this + /// [`Channel`] if `force_holding_cell` is false. + fn send_update_fee(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option where L::Target: Logger { + if !self.context.is_outbound() { + panic!("Cannot send fee from inbound channel"); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned())); + if !self.context.is_usable() { + panic!("Cannot update fee until channel is fully established and we haven't started shutting down"); } - - self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?; - Ok(()) - } - - pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned())); + if !self.context.is_live() { + panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)"); } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned())); + + // Before proposing a feerate update, check that we can actually afford the new fee. + let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); + let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); + let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000; + let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; + if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { + //TODO: auto-close after a number of failures? + log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); + return None; } - self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?; - Ok(()) - } - - pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> - where L::Target: Logger - { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())); - } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())); + // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. + let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; + let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; + if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); + return None; } - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { - return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); + if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); + return None; } - let funding_script = self.context.get_funding_redeemscript(); + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + force_holding_cell = true; + } - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + if force_holding_cell { + self.context.holding_cell_update_fee = Some(feerate_per_kw); + return None; + } - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); - let commitment_txid = { - let trusted_tx = commitment_stats.tx.trust(); - let bitcoin_tx = trusted_tx.built_transaction(); - let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + debug_assert!(self.context.pending_update_fee.is_none()); + self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound)); - log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", - log_bytes!(msg.signature.serialize_compact()[..]), - log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), - log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) { - return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); - } - bitcoin_tx.txid - }; - let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); + Some(msgs::UpdateFee { + channel_id: self.context.channel_id, + feerate_per_kw, + }) + } - // If our counterparty updated the channel fee in this commitment transaction, check that - // they can actually afford the new fee now. - let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee { - update_state == FeeUpdateState::RemoteAnnounced - } else { false }; - if update_fee { - debug_assert!(!self.context.is_outbound()); - let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; - if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { - return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())); - } - } - #[cfg(any(test, fuzzing))] - { - if self.context.is_outbound() { - let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take(); - *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; - if let Some(info) = projected_commit_tx_info { - let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len() - + self.context.holding_cell_htlc_updates.len(); - if info.total_pending_htlcs == total_pending_htlcs - && info.next_holder_htlc_id == self.context.next_holder_htlc_id - && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id - && info.feerate == self.context.feerate_per_kw { - assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000); - } - } - } + /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC + /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be + /// resent. + /// No further message handling calls may be made until a channel_reestablish dance has + /// completed. + pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) where L::Target: Logger { + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); + if self.context.channel_state < ChannelState::FundingSent as u32 { + self.context.channel_state = ChannelState::ShutdownComplete as u32; + return; } - if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs { - return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))); + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) { + // While the below code should be idempotent, it's simpler to just return early, as + // redundant disconnect events can fire, though they should be rare. + return; } - // Up to LDK 0.0.115, HTLC information was required to be duplicated in the - // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed - // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of - // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for - // backwards compatibility, we never use it in production. To provide test coverage, here, - // we randomly decide (in test/fuzzing builds) to use the new vec sometimes. - #[allow(unused_assignments, unused_mut)] - let mut separate_nondust_htlc_sources = false; - #[cfg(all(feature = "std", any(test, fuzzing)))] { - use core::hash::{BuildHasher, Hasher}; - // Get a random value using the only std API to do so - the DefaultHasher - let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish(); - separate_nondust_htlc_sources = rand_val % 2 == 0; + if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed { + self.context.announcement_sigs_state = AnnouncementSigsState::NotSent; } - let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len()); - let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len()); - for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() { - if let Some(_) = htlc.transaction_output_index { - let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw, - self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(), - false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); + // Upon reconnect we have to start the closing_signed dance over, but shutdown messages + // will be retransmitted. + self.context.last_sent_closing_fee = None; + self.context.pending_counterparty_closing_signed = None; + self.context.closing_fee_limits = None; - let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys); - let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All }; - let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); - log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", - log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), - encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id())); - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { - return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); - } - if !separate_nondust_htlc_sources { - htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take())); - } - } else { - htlcs_and_sigs.push((htlc, None, source_opt.take())); - } - if separate_nondust_htlc_sources { - if let Some(source) = source_opt.take() { - nondust_htlc_sources.push(source); - } + let mut inbound_drop_count = 0; + self.context.pending_inbound_htlcs.retain(|htlc| { + match htlc.state { + InboundHTLCState::RemoteAnnounced(_) => { + // They sent us an update_add_htlc but we never got the commitment_signed. + // We'll tell them what commitment_signed we're expecting next and they'll drop + // this HTLC accordingly + inbound_drop_count += 1; + false + }, + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => { + // We received a commitment_signed updating this HTLC and (at least hopefully) + // sent a revoke_and_ack (which we can re-transmit) and have heard nothing + // in response to it yet, so don't touch it. + true + }, + InboundHTLCState::Committed => true, + InboundHTLCState::LocalRemoved(_) => { + // We (hopefully) sent a commitment_signed updating this HTLC (which we can + // re-transmit if needed) and they may have even sent a revoke_and_ack back + // (that we missed). Keep this around for now and if they tell us they missed + // the commitment_signed we can re-transmit the update then. + true + }, } - debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere"); - } - - let holder_commitment_tx = HolderCommitmentTransaction::new( - commitment_stats.tx, - msg.signature, - msg.htlc_signatures.clone(), - &self.context.get_holder_pubkeys().funding_pubkey, - self.context.counterparty_funding_pubkey() - ); - - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; + }); + self.context.next_counterparty_htlc_id -= inbound_drop_count; - // Update state now that we've passed all the can-fail calls... - let mut need_commitment = false; - if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee { - if *update_state == FeeUpdateState::RemoteAnnounced { - *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce; - need_commitment = true; + if let Some((_, update_state)) = self.context.pending_update_fee { + if update_state == FeeUpdateState::RemoteAnnounced { + debug_assert!(!self.context.is_outbound()); + self.context.pending_update_fee = None; } } - for htlc in self.context.pending_inbound_htlcs.iter_mut() { - let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { - Some(forward_info.clone()) - } else { None }; - if let Some(forward_info) = new_forward { - log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", - log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); - htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); - need_commitment = true; - } - } - let mut claimed_htlcs = Vec::new(); for htlc in self.context.pending_outbound_htlcs.iter_mut() { - if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state { - log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", - log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); - // Grab the preimage, if it exists, instead of cloning - let mut reason = OutboundHTLCOutcome::Success(None); - mem::swap(outcome, &mut reason); - if let OutboundHTLCOutcome::Success(Some(preimage)) = reason { - // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b) - // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could - // have a `Success(None)` reason. In this case we could forget some HTLC - // claims, but such an upgrade is unlikely and including claimed HTLCs here - // fixes a bug which the user was exposed to on 0.0.104 when they started the - // claim anyway. - claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage)); - } - htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason); - need_commitment = true; + if let OutboundHTLCState::RemoteRemoved(_) = htlc.state { + // They sent us an update to remove this but haven't yet sent the corresponding + // commitment_signed, we need to move it back to Committed and they can re-send + // the update upon reconnection. + htlc.state = OutboundHTLCState::Committed; } } - self.context.latest_monitor_update_id += 1; - let mut monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { - commitment_tx: holder_commitment_tx, - htlc_outputs: htlcs_and_sigs, - claimed_htlcs, - nondust_htlc_sources, - }] - }; + self.context.sent_message_awaiting_response = None; - self.context.cur_holder_commitment_transaction_number -= 1; - // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call - // build_commitment_no_status_check() next which will reset this to RAAFirst. - self.context.resend_order = RAACommitmentOrder::CommitmentFirst; + self.context.channel_state |= ChannelState::PeerDisconnected as u32; + log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id())); + } - if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 { - // In case we initially failed monitor updating without requiring a response, we need - // to make sure the RAA gets sent first. - self.context.monitor_pending_revoke_and_ack = true; - if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { - // If we were going to send a commitment_signed after the RAA, go ahead and do all - // the corresponding HTLC status updates so that get_last_commitment_update - // includes the right HTLCs. - self.context.monitor_pending_commitment_signed = true; - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - } - log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", - log_bytes!(self.context.channel_id)); - return Ok(self.push_ret_blockable_mon_update(monitor_update)); - } + /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. + /// This must be called before we return the [`ChannelMonitorUpdate`] back to the + /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor + /// update completes (potentially immediately). + /// The messages which were generated with the monitor update must *not* have been sent to the + /// remote end, and must instead have been dropped. They will be regenerated when + /// [`Self::monitor_updating_restored`] is called. + /// + /// [`ChannelManager`]: super::channelmanager::ChannelManager + /// [`chain::Watch`]: crate::chain::Watch + /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress + fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool, + resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, + mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + mut pending_finalized_claimed_htlcs: Vec + ) { + self.context.monitor_pending_revoke_and_ack |= resend_raa; + self.context.monitor_pending_commitment_signed |= resend_commitment; + self.context.monitor_pending_channel_ready |= resend_channel_ready; + self.context.monitor_pending_forwards.append(&mut pending_forwards); + self.context.monitor_pending_failures.append(&mut pending_fails); + self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs); + self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32; + } - let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { - // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - - // we'll send one right away when we get the revoke_and_ack when we - // free_holding_cell_htlcs(). - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - true - } else { false }; + /// Indicates that the latest ChannelMonitor update has been committed by the client + /// successfully and we should restore normal operation. Returns messages which should be sent + /// to the remote side. + pub fn monitor_updating_restored( + &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash, + user_config: &UserConfig, best_block_height: u32 + ) -> MonitorRestoreUpdates + where + L::Target: Logger, + NS::Target: NodeSigner + { + assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); + self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); + let mut found_blocked = false; + self.context.pending_monitor_updates.retain(|upd| { + if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); } + if upd.blocked { found_blocked = true; } + upd.blocked + }); - log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", - log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); - self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); - return Ok(self.push_ret_blockable_mon_update(monitor_update)); - } + // If we're past (or at) the FundingSent stage on an outbound channel, try to + // (re-)broadcast the funding transaction as we may have declined to broadcast it when we + // first received the funding_signed. + let mut funding_broadcastable = + if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { + self.context.funding_transaction.take() + } else { None }; + // That said, if the funding transaction is already confirmed (ie we're active with a + // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx. + if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { + funding_broadcastable = None; + } - /// Public version of the below, checking relevant preconditions first. - /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and - /// returns `(None, Vec::new())`. - pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { - if self.context.channel_state >= ChannelState::ChannelReady as u32 && - (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { - self.free_holding_cell_htlcs(logger) - } else { (None, Vec::new()) } - } + // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress + // (and we assume the user never directly broadcasts the funding transaction and waits for + // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're + // * an inbound channel that failed to persist the monitor on funding_created and we got + // the funding transaction confirmed before the monitor was persisted, or + // * a 0-conf channel and intended to send the channel_ready before any broadcast at all. + let channel_ready = if self.context.monitor_pending_channel_ready { + assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), + "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); + self.context.monitor_pending_channel_ready = false; + let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + Some(msgs::ChannelReady { + channel_id: self.context.channel_id(), + next_per_commitment_point, + short_channel_id_alias: Some(self.context.outbound_scid_alias), + }) + } else { None }; - /// Frees any pending commitment updates in the holding cell, generating the relevant messages - /// for our counterparty. - fn free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { - assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); - if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { - log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), - if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id())); + let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger); - let mut monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! - updates: Vec::new(), - }; + let mut accepted_htlcs = Vec::new(); + mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); + let mut failed_htlcs = Vec::new(); + mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); + let mut finalized_claimed_htlcs = Vec::new(); + mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); - let mut htlc_updates = Vec::new(); - mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates); - let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len()); - let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len()); - let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len()); - let mut htlcs_to_fail = Vec::new(); - for htlc_update in htlc_updates.drain(..) { - // Note that this *can* fail, though it should be due to rather-rare conditions on - // fee races with adding too many outputs which push our total payments just over - // the limit. In case it's less rare than I anticipate, we may want to revisit - // handling this case better and maybe fulfilling some of the HTLCs while attempting - // to rebalance channels. - match &htlc_update { - &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => { - match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) { - Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()), - Err(e) => { - match e { - ChannelError::Ignore(ref msg) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", - log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id())); - // If we fail to send here, then this HTLC should - // be failed backwards. Failing to send here - // indicates that this HTLC may keep being put back - // into the holding cell without ever being - // successfully forwarded/failed/fulfilled, causing - // our counterparty to eventually close on us. - htlcs_to_fail.push((source.clone(), *payment_hash)); - }, - _ => { - panic!("Got a non-IgnoreError action trying to send holding cell HTLC"); - }, - } - } - } - }, - &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { - // If an HTLC claim was previously added to the holding cell (via - // `get_update_fulfill_htlc`, then generating the claim message itself must - // not fail - any in between attempts to claim the HTLC will have resulted - // in it hitting the holding cell again and we cannot change the state of a - // holding cell HTLC from fulfill to anything else. - let (update_fulfill_msg_option, mut additional_monitor_update) = - if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { - (msg, monitor_update) - } else { unreachable!() }; - update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); - monitor_update.updates.append(&mut additional_monitor_update.updates); - }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { - match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) { - Ok(update_fail_msg_option) => { - // If an HTLC failure was previously added to the holding cell (via - // `queue_fail_htlc`) then generating the fail message itself must - // not fail - we should never end up in a state where we double-fail - // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait - // for a full revocation before failing. - update_fail_htlcs.push(update_fail_msg_option.unwrap()) - }, - Err(e) => { - if let ChannelError::Ignore(_) = e {} - else { - panic!("Got a non-IgnoreError action trying to fail holding cell HTLC"); - } - } - } - }, - } - } - if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() { - return (None, htlcs_to_fail); - } - let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() { - self.send_update_fee(feerate, false, logger) - } else { - None + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { + self.context.monitor_pending_revoke_and_ack = false; + self.context.monitor_pending_commitment_signed = false; + return MonitorRestoreUpdates { + raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, + accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs }; + } - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id - // but we want them to be strictly increasing by one, so reset it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - - log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", - log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, - update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len()); + let raa = if self.context.monitor_pending_revoke_and_ack { + Some(self.get_last_revoke_and_ack()) + } else { None }; + let commitment_update = if self.context.monitor_pending_commitment_signed { + self.mark_awaiting_response(); + Some(self.get_last_commitment_update(logger)) + } else { None }; - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); - (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) - } else { - (None, Vec::new()) + self.context.monitor_pending_revoke_and_ack = false; + self.context.monitor_pending_commitment_signed = false; + let order = self.context.resend_order.clone(); + log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", + log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, + if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, + match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); + MonitorRestoreUpdates { + raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs } } - /// Handles receiving a remote's revoke_and_ack. Note that we may return a new - /// commitment_signed message here in case we had pending outbound HTLCs to add which were - /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, - /// generating an appropriate error *after* the channel state has been updated based on the - /// revoke_and_ack message. - pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError> - where L::Target: Logger, + pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> + where F::Target: FeeEstimator, L::Target: Logger { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); + if self.context.is_outbound() { + return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned())); } if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned())); - } - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() { - return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned())); + return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } + Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; + let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None); - let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned()); - - if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point { - if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point { - return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned())); + self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); + self.context.update_time_counter += 1; + // If the feerate has increased over the previous dust buffer (note that + // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we + // won't be pushed over our dust exposure limit by the feerate increase. + if feerate_over_dust_buffer { + let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); + let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; + let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; + if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", + msg.feerate_per_kw, holder_tx_dust_exposure))); + } + if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", + msg.feerate_per_kw, counterparty_tx_dust_exposure))); } } + Ok(()) + } - if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 { - // Our counterparty seems to have burned their coins to us (by revoking a state when we - // haven't given them a new commitment transaction to broadcast). We should probably - // take advantage of this by updating our channel monitor, sending them an error, and - // waiting for them to broadcast their latest (now-revoked claim). But, that would be a - // lot of work, and there's some chance this is all a misunderstanding anyway. - // We have to do *something*, though, since our signer may get mad at us for otherwise - // jumping a remote commitment number, so best to just force-close and move on. - return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned())); - } - - #[cfg(any(test, fuzzing))] - { - *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; - *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; - } - - self.context.holder_signer.validate_counterparty_revocation( - self.context.cur_counterparty_commitment_transaction_number + 1, - &secret - ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?; - - self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret) - .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?; - self.context.latest_monitor_update_id += 1; - let mut monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::CommitmentSecret { - idx: self.context.cur_counterparty_commitment_transaction_number + 1, - secret: msg.per_commitment_secret, - }], - }; - - // Update state now that we've passed all the can-fail calls... - // (note that we may still fail to generate the new commitment_signed message, but that's - // OK, we step the channel here and *then* if the new generation fails we can fail the - // channel based on that, but stepping stuff here should be safe either way. - self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32); - self.context.sent_message_awaiting_response = None; - self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; - self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - self.context.cur_counterparty_commitment_transaction_number -= 1; - - if self.context.announcement_sigs_state == AnnouncementSigsState::Committed { - self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; + fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { + let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); + msgs::RevokeAndACK { + channel_id: self.context.channel_id, + per_commitment_secret, + next_per_commitment_point, + #[cfg(taproot)] + next_local_nonce: None, } + } - log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id())); - let mut to_forward_infos = Vec::new(); - let mut revoked_htlcs = Vec::new(); - let mut finalized_claimed_htlcs = Vec::new(); + fn get_last_commitment_update(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger { + let mut update_add_htlcs = Vec::new(); + let mut update_fulfill_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); let mut update_fail_malformed_htlcs = Vec::new(); - let mut require_commitment = false; - let mut value_to_self_msat_diff: i64 = 0; - - { - // Take references explicitly so that we can hold multiple references to self.context. - let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs; - let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs; - - // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug) - pending_inbound_htlcs.retain(|htlc| { - if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { - log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0)); - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - value_to_self_msat_diff += htlc.amount_msat as i64; - } - false - } else { true } - }); - pending_outbound_htlcs.retain(|htlc| { - if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state { - log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0)); - if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :( - revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); - } else { - finalized_claimed_htlcs.push(htlc.source.clone()); - // They fulfilled, so we sent them money - value_to_self_msat_diff -= htlc.amount_msat as i64; - } - false - } else { true } - }); - for htlc in pending_inbound_htlcs.iter_mut() { - let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state { - true - } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state { - true - } else { false }; - if swap { - let mut state = InboundHTLCState::Committed; - mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); - require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { - match forward_info { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0)); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); - update_fail_malformed_htlcs.push(msg) - }, - } - }, - PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0)); - to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed; - } - } - } - } - } - for htlc in pending_outbound_htlcs.iter_mut() { - if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { - log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0)); - htlc.state = OutboundHTLCState::Committed; - } - if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { - log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); - // Grab the preimage, if it exists, instead of cloning - let mut reason = OutboundHTLCOutcome::Success(None); - mem::swap(outcome, &mut reason); - htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason); - require_commitment = true; - } + for htlc in self.context.pending_outbound_htlcs.iter() { + if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state { + update_add_htlcs.push(msgs::UpdateAddHTLC { + channel_id: self.context.channel_id(), + htlc_id: htlc.htlc_id, + amount_msat: htlc.amount_msat, + payment_hash: htlc.payment_hash, + cltv_expiry: htlc.cltv_expiry, + onion_routing_packet: (**onion_packet).clone(), + }); } } - self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64; - if let Some((feerate, update_state)) = self.context.pending_update_fee { - match update_state { - FeeUpdateState::Outbound => { - debug_assert!(self.context.is_outbound()); - log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate); - self.context.feerate_per_kw = feerate; - self.context.pending_update_fee = None; - }, - FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); }, - FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { - debug_assert!(!self.context.is_outbound()); - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); - require_commitment = true; - self.context.feerate_per_kw = feerate; - self.context.pending_update_fee = None; - }, + for htlc in self.context.pending_inbound_htlcs.iter() { + if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { + match reason { + &InboundHTLCRemovalReason::FailRelay(ref err_packet) => { + update_fail_htlcs.push(msgs::UpdateFailHTLC { + channel_id: self.context.channel_id(), + htlc_id: htlc.htlc_id, + reason: err_packet.clone() + }); + }, + &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => { + update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC { + channel_id: self.context.channel_id(), + htlc_id: htlc.htlc_id, + sha256_of_onion: sha256_of_onion.clone(), + failure_code: failure_code.clone(), + }); + }, + &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => { + update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC { + channel_id: self.context.channel_id(), + htlc_id: htlc.htlc_id, + payment_preimage: payment_preimage.clone(), + }); + }, + } } } - if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 { - // We can't actually generate a new commitment transaction (incl by freeing holding - // cells) while we can't update the monitor, so we just return what we have. - if require_commitment { - self.context.monitor_pending_commitment_signed = true; - // When the monitor updating is restored we'll call get_last_commitment_update(), - // which does not update state, but we're definitely now awaiting a remote revoke - // before we can step forward any more, so set it here. - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - } - self.context.monitor_pending_forwards.append(&mut to_forward_infos); - self.context.monitor_pending_failures.append(&mut revoked_htlcs); - self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); - log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id())); - return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); + let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() { + Some(msgs::UpdateFee { + channel_id: self.context.channel_id(), + feerate_per_kw: self.context.pending_update_fee.unwrap().0, + }) + } else { None }; + + log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, + update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); + msgs::CommitmentUpdate { + update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, + commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0, } + } - match self.free_holding_cell_htlcs(logger) { - (Some(_), htlcs_to_fail) => { - let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update; - // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - - self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) - }, - (None, htlcs_to_fail) => { - if require_commitment { - let mut additional_update = self.build_commitment_no_status_check(logger); - - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - - log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", - log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); - self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) - } else { - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id())); - self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) - } - } - } - } - - /// Queues up an outbound update fee by placing it in the holding cell. You should call - /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the - /// commitment update. - pub fn queue_update_fee(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger { - let msg_opt = self.send_update_fee(feerate_per_kw, true, logger); - assert!(msg_opt.is_none(), "We forced holding cell?"); - } - - /// Adds a pending update to this channel. See the doc for send_htlc for - /// further details on the optionness of the return value. - /// If our balance is too low to cover the cost of the next commitment transaction at the - /// new feerate, the update is cancelled. + /// May panic if some calls other than message-handling calls (which will all Err immediately) + /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. /// - /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this - /// [`Channel`] if `force_holding_cell` is false. - fn send_update_fee(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option where L::Target: Logger { - if !self.context.is_outbound() { - panic!("Cannot send fee from inbound channel"); - } - if !self.context.is_usable() { - panic!("Cannot update fee until channel is fully established and we haven't started shutting down"); - } - if !self.context.is_live() { - panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)"); - } - - // Before proposing a feerate update, check that we can actually afford the new fee. - let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); - let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000; - let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; - if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { - //TODO: auto-close after a number of failures? - log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); - return None; + /// Some links printed in log lines are included here to check them during build (when run with + /// `cargo doc --document-private-items`): + /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and + /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`]. + pub fn channel_reestablish( + &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, + genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock + ) -> Result + where + L::Target: Logger, + NS::Target: NodeSigner + { + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { + // While BOLT 2 doesn't indicate explicitly we should error this channel here, it + // almost certainly indicates we are going to end up out-of-sync in some way, so we + // just close here instead of trying to recover. + return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned())); } - // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { - log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); - return None; - } - if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { - log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); - return None; + if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER || + msg.next_local_commitment_number == 0 { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned())); } - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { - force_holding_cell = true; + if msg.next_remote_commitment_number > 0 { + let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); + let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) + .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; + if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + } + if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { + macro_rules! log_and_panic { + ($err_msg: expr) => { + log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); + panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); + } + } + log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ + This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ + More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ + If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ + ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ + ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ + Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ + See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); + } } - if force_holding_cell { - self.context.holding_cell_update_fee = Some(feerate_per_kw); - return None; + // Before we change the state of the channel, we check if the peer is sending a very old + // commitment transaction number, if yes we send a warning message. + let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; + if msg.next_remote_commitment_number + 1 < our_commitment_transaction { + return Err( + ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction)) + ); } - debug_assert!(self.context.pending_update_fee.is_none()); - self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound)); + // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all + // remaining cases either succeed or ErrorMessage-fail). + self.context.channel_state &= !(ChannelState::PeerDisconnected as u32); + self.context.sent_message_awaiting_response = None; - Some(msgs::UpdateFee { - channel_id: self.context.channel_id, - feerate_per_kw, - }) - } + let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 { + assert!(self.context.shutdown_scriptpubkey.is_some()); + Some(msgs::Shutdown { + channel_id: self.context.channel_id, + scriptpubkey: self.get_closing_scriptpubkey(), + }) + } else { None }; - /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC - /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be - /// resent. - /// No further message handling calls may be made until a channel_reestablish dance has - /// completed. - pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) where L::Target: Logger { - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.context.channel_state < ChannelState::FundingSent as u32 { - self.context.channel_state = ChannelState::ShutdownComplete as u32; - return; - } + let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger); - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) { - // While the below code should be idempotent, it's simpler to just return early, as - // redundant disconnect events can fire, though they should be rare. - return; - } + if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { + // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. + if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 || + self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + if msg.next_remote_commitment_number != 0 { + return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned())); + } + // Short circuit the whole handler as there is nothing we can resend them + return Ok(ReestablishResponses { + channel_ready: None, + raa: None, commitment_update: None, + order: RAACommitmentOrder::CommitmentFirst, + shutdown_msg, announcement_sigs, + }); + } - if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed { - self.context.announcement_sigs_state = AnnouncementSigsState::NotSent; + // We have OurChannelReady set! + let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + return Ok(ReestablishResponses { + channel_ready: Some(msgs::ChannelReady { + channel_id: self.context.channel_id(), + next_per_commitment_point, + short_channel_id_alias: Some(self.context.outbound_scid_alias), + }), + raa: None, commitment_update: None, + order: RAACommitmentOrder::CommitmentFirst, + shutdown_msg, announcement_sigs, + }); } - // Upon reconnect we have to start the closing_signed dance over, but shutdown messages - // will be retransmitted. - self.context.last_sent_closing_fee = None; - self.context.pending_counterparty_closing_signed = None; - self.context.closing_fee_limits = None; - - let mut inbound_drop_count = 0; - self.context.pending_inbound_htlcs.retain(|htlc| { - match htlc.state { - InboundHTLCState::RemoteAnnounced(_) => { - // They sent us an update_add_htlc but we never got the commitment_signed. - // We'll tell them what commitment_signed we're expecting next and they'll drop - // this HTLC accordingly - inbound_drop_count += 1; - false - }, - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => { - // We received a commitment_signed updating this HTLC and (at least hopefully) - // sent a revoke_and_ack (which we can re-transmit) and have heard nothing - // in response to it yet, so don't touch it. - true - }, - InboundHTLCState::Committed => true, - InboundHTLCState::LocalRemoved(_) => { - // We (hopefully) sent a commitment_signed updating this HTLC (which we can - // re-transmit if needed) and they may have even sent a revoke_and_ack back - // (that we missed). Keep this around for now and if they tell us they missed - // the commitment_signed we can re-transmit the update then. - true - }, + let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { + // Remote isn't waiting on any RevokeAndACK from us! + // Note that if we need to repeat our ChannelReady we'll do that in the next if block. + None + } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number { + if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + self.context.monitor_pending_revoke_and_ack = true; + None + } else { + Some(self.get_last_revoke_and_ack()) } - }); - self.context.next_counterparty_htlc_id -= inbound_drop_count; + } else { + return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned())); + }; - if let Some((_, update_state)) = self.context.pending_update_fee { - if update_state == FeeUpdateState::RemoteAnnounced { - debug_assert!(!self.context.is_outbound()); - self.context.pending_update_fee = None; - } + // We increment cur_counterparty_commitment_transaction_number only upon receipt of + // revoke_and_ack, not on sending commitment_signed, so we add one if have + // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten + // the corresponding revoke_and_ack back yet. + let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0; + if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() { + self.mark_awaiting_response(); } + let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - for htlc in self.context.pending_outbound_htlcs.iter_mut() { - if let OutboundHTLCState::RemoteRemoved(_) = htlc.state { - // They sent us an update to remove this but haven't yet sent the corresponding - // commitment_signed, we need to move it back to Committed and they can re-send - // the update upon reconnection. - htlc.state = OutboundHTLCState::Committed; - } - } - - self.context.sent_message_awaiting_response = None; - - self.context.channel_state |= ChannelState::PeerDisconnected as u32; - log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id())); - } - - /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. - /// This must be called before we return the [`ChannelMonitorUpdate`] back to the - /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor - /// update completes (potentially immediately). - /// The messages which were generated with the monitor update must *not* have been sent to the - /// remote end, and must instead have been dropped. They will be regenerated when - /// [`Self::monitor_updating_restored`] is called. - /// - /// [`ChannelManager`]: super::channelmanager::ChannelManager - /// [`chain::Watch`]: crate::chain::Watch - /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress - fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool, - resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, - mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - mut pending_finalized_claimed_htlcs: Vec - ) { - self.context.monitor_pending_revoke_and_ack |= resend_raa; - self.context.monitor_pending_commitment_signed |= resend_commitment; - self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.append(&mut pending_forwards); - self.context.monitor_pending_failures.append(&mut pending_fails); - self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs); - self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32; - } - - /// Indicates that the latest ChannelMonitor update has been committed by the client - /// successfully and we should restore normal operation. Returns messages which should be sent - /// to the remote side. - pub fn monitor_updating_restored( - &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash, - user_config: &UserConfig, best_block_height: u32 - ) -> MonitorRestoreUpdates - where - L::Target: Logger, - NS::Target: NodeSigner - { - assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); - self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); - let mut found_blocked = false; - self.context.pending_monitor_updates.retain(|upd| { - if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); } - if upd.blocked { found_blocked = true; } - upd.blocked - }); - - // If we're past (or at) the FundingSent stage on an outbound channel, try to - // (re-)broadcast the funding transaction as we may have declined to broadcast it when we - // first received the funding_signed. - let mut funding_broadcastable = - if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { - self.context.funding_transaction.take() - } else { None }; - // That said, if the funding transaction is already confirmed (ie we're active with a - // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx. - if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { - funding_broadcastable = None; - } - - // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress - // (and we assume the user never directly broadcasts the funding transaction and waits for - // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're - // * an inbound channel that failed to persist the monitor on funding_created and we got - // the funding transaction confirmed before the monitor was persisted, or - // * a 0-conf channel and intended to send the channel_ready before any broadcast at all. - let channel_ready = if self.context.monitor_pending_channel_ready { - assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), - "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); - self.context.monitor_pending_channel_ready = false; + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { + // We should never have to worry about MonitorUpdateInProgress resending ChannelReady let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); Some(msgs::ChannelReady { channel_id: self.context.channel_id(), @@ -4156,671 +4113,382 @@ impl Channel { }) } else { None }; - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger); + if msg.next_local_commitment_number == next_counterparty_commitment_number { + if required_revoke.is_some() { + log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id())); + } else { + log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id())); + } - let mut accepted_htlcs = Vec::new(); - mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); - let mut failed_htlcs = Vec::new(); - mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); - let mut finalized_claimed_htlcs = Vec::new(); - mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); + Ok(ReestablishResponses { + channel_ready, shutdown_msg, announcement_sigs, + raa: required_revoke, + commitment_update: None, + order: self.context.resend_order.clone(), + }) + } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { + if required_revoke.is_some() { + log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id())); + } else { + log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id())); + } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { - self.context.monitor_pending_revoke_and_ack = false; - self.context.monitor_pending_commitment_signed = false; - return MonitorRestoreUpdates { - raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs - }; + if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { + self.context.monitor_pending_commitment_signed = true; + Ok(ReestablishResponses { + channel_ready, shutdown_msg, announcement_sigs, + commitment_update: None, raa: None, + order: self.context.resend_order.clone(), + }) + } else { + Ok(ReestablishResponses { + channel_ready, shutdown_msg, announcement_sigs, + raa: required_revoke, + commitment_update: Some(self.get_last_commitment_update(logger)), + order: self.context.resend_order.clone(), + }) + } + } else { + Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned())) } + } - let raa = if self.context.monitor_pending_revoke_and_ack { - Some(self.get_last_revoke_and_ack()) - } else { None }; - let commitment_update = if self.context.monitor_pending_commitment_signed { - self.mark_awaiting_response(); - Some(self.get_last_commitment_update(logger)) - } else { None }; + /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole + /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart, + /// at which point they will be recalculated. + fn calculate_closing_fee_limits(&mut self, fee_estimator: &LowerBoundedFeeEstimator) + -> (u64, u64) + where F::Target: FeeEstimator + { + if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); } - self.context.monitor_pending_revoke_and_ack = false; - self.context.monitor_pending_commitment_signed = false; - let order = self.context.resend_order.clone(); - log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", - log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, - if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, - match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); - MonitorRestoreUpdates { - raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs + // Propose a range from our current Background feerate to our Normal feerate plus our + // force_close_avoidance_max_fee_satoshis. + // If we fail to come to consensus, we'll have to force-close. + let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background); + let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() }; + + // The spec requires that (when the channel does not have anchors) we only send absolute + // channel fees no greater than the absolute channel fee on the current commitment + // transaction. It's unclear *which* commitment transaction this refers to, and there isn't + // very good reason to apply such a limit in any case. We don't bother doing so, risking + // some force-closure by old nodes, but we wanted to close the channel anyway. + + if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw { + let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) }; + proposed_feerate = cmp::max(proposed_feerate, min_feerate); + proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate); } + + // Note that technically we could end up with a lower minimum fee if one sides' balance is + // below our dust limit, causing the output to disappear. We don't bother handling this + // case, however, as this should only happen if a channel is closed before any (material) + // payments have been made on it. This may cause slight fee overpayment and/or failure to + // come to consensus with our counterparty on appropriate fees, however it should be a + // relatively rare case. We can revisit this later, though note that in order to determine + // if the funders' output is dust we have to know the absolute fee we're going to use. + let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); + let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; + let proposed_max_total_fee_satoshis = if self.context.is_outbound() { + // We always add force_close_avoidance_max_fee_satoshis to our normal + // feerate-calculated fee, but allow the max to be overridden if we're using a + // target feerate-calculated fee. + cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis, + proposed_max_feerate as u64 * tx_weight / 1000) + } else { + self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000 + }; + + self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis)); + self.context.closing_fee_limits.clone().unwrap() } - pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> - where F::Target: FeeEstimator, L::Target: Logger - { - if self.context.is_outbound() { - return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned())); - } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); - } - Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; - let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None); + /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true + /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At + /// this point if we're the funder we should send the initial closing_signed, and in any case + /// shutdown should complete within a reasonable timeframe. + fn closing_negotiation_ready(&self) -> bool { + self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() && + self.context.channel_state & + (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) + == BOTH_SIDES_SHUTDOWN_MASK && + self.context.pending_update_fee.is_none() + } - self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); - self.context.update_time_counter += 1; - // If the feerate has increased over the previous dust buffer (note that - // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we - // won't be pushed over our dust exposure limit by the feerate increase. - if feerate_over_dust_buffer { - let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); - let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", - msg.feerate_per_kw, holder_tx_dust_exposure))); - } - if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { - return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", - msg.feerate_per_kw, counterparty_tx_dust_exposure))); + /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning + /// an Err if no progress is being made and the channel should be force-closed instead. + /// Should be called on a one-minute timer. + pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> { + if self.closing_negotiation_ready() { + if self.context.closing_signed_in_flight { + return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned())); + } else { + self.context.closing_signed_in_flight = true; } } Ok(()) } - fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); - msgs::RevokeAndACK { - channel_id: self.context.channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, - } - } - - fn get_last_commitment_update(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger { - let mut update_add_htlcs = Vec::new(); - let mut update_fulfill_htlcs = Vec::new(); - let mut update_fail_htlcs = Vec::new(); - let mut update_fail_malformed_htlcs = Vec::new(); - - for htlc in self.context.pending_outbound_htlcs.iter() { - if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state { - update_add_htlcs.push(msgs::UpdateAddHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc.htlc_id, - amount_msat: htlc.amount_msat, - payment_hash: htlc.payment_hash, - cltv_expiry: htlc.cltv_expiry, - onion_routing_packet: (**onion_packet).clone(), - }); - } + pub fn maybe_propose_closing_signed( + &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) + -> Result<(Option, Option), ChannelError> + where F::Target: FeeEstimator, L::Target: Logger + { + if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() { + return Ok((None, None)); } - for htlc in self.context.pending_inbound_htlcs.iter() { - if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { - match reason { - &InboundHTLCRemovalReason::FailRelay(ref err_packet) => { - update_fail_htlcs.push(msgs::UpdateFailHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc.htlc_id, - reason: err_packet.clone() - }); - }, - &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => { - update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc.htlc_id, - sha256_of_onion: sha256_of_onion.clone(), - failure_code: failure_code.clone(), - }); - }, - &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => { - update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc.htlc_id, - payment_preimage: payment_preimage.clone(), - }); - }, - } + if !self.context.is_outbound() { + if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() { + return self.closing_signed(fee_estimator, &msg); } + return Ok((None, None)); } - let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() { - Some(msgs::UpdateFee { - channel_id: self.context.channel_id(), - feerate_per_kw: self.context.pending_update_fee.unwrap().0, - }) - } else { None }; + let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); - log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, - update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); - msgs::CommitmentUpdate { - update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, - commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0, - } + assert!(self.context.shutdown_scriptpubkey.is_some()); + let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false); + log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", + our_min_fee, our_max_fee, total_fee_satoshis); + + let sig = self.context.holder_signer + .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) + .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; + + self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); + Ok((Some(msgs::ClosingSigned { + channel_id: self.context.channel_id, + fee_satoshis: total_fee_satoshis, + signature: sig, + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: our_min_fee, + max_fee_satoshis: our_max_fee, + }), + }), None)) } - /// May panic if some calls other than message-handling calls (which will all Err immediately) - /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. + // Marks a channel as waiting for a response from the counterparty. If it's not received + // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt + // a reconnection. + fn mark_awaiting_response(&mut self) { + self.context.sent_message_awaiting_response = Some(0); + } + + /// Determines whether we should disconnect the counterparty due to not receiving a response + /// within our expected timeframe. /// - /// Some links printed in log lines are included here to check them during build (when run with - /// `cargo doc --document-private-items`): - /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and - /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`]. - pub fn channel_reestablish( - &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock - ) -> Result - where - L::Target: Logger, - NS::Target: NodeSigner + /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`]. + pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool { + let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() { + ticks_elapsed + } else { + // Don't disconnect when we're not waiting on a response. + return false; + }; + *ticks_elapsed += 1; + *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS + } + + pub fn shutdown( + &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown + ) -> Result<(Option, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError> + where SP::Target: SignerProvider { - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { - // While BOLT 2 doesn't indicate explicitly we should error this channel here, it - // almost certainly indicates we are going to end up out-of-sync in some way, so we - // just close here instead of trying to recover. - return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned())); + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); + } + if self.context.channel_state < ChannelState::FundingSent as u32 { + // Spec says we should fail the connection, not the channel, but that's nonsense, there + // are plenty of reasons you may want to fail a channel pre-funding, and spec says you + // can do that via error message without getting a connection fail anyway... + return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned())); + } + for htlc in self.context.pending_inbound_htlcs.iter() { + if let InboundHTLCState::RemoteAnnounced(_) = htlc.state { + return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned())); + } } + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER || - msg.next_local_commitment_number == 0 { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned())); + if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) { + return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))); } - if msg.next_remote_commitment_number > 0 { - let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); - let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) - .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; - if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); - } - if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { - macro_rules! log_and_panic { - ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); - panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); - } - } - log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ - This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ - More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ - If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ - ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ - ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ - Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ - See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); + if self.context.counterparty_shutdown_scriptpubkey.is_some() { + if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() { + return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex()))); } + } else { + self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone()); } - // Before we change the state of the channel, we check if the peer is sending a very old - // commitment transaction number, if yes we send a warning message. - let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; - if msg.next_remote_commitment_number + 1 < our_commitment_transaction { - return Err( - ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction)) - ); - } + // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc + // immediately after the commitment dance, but we can send a Shutdown because we won't send + // any further commitment updates after we set LocalShutdownSent. + let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32; - // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all - // remaining cases either succeed or ErrorMessage-fail). - self.context.channel_state &= !(ChannelState::PeerDisconnected as u32); - self.context.sent_message_awaiting_response = None; + let update_shutdown_script = match self.context.shutdown_scriptpubkey { + Some(_) => false, + None => { + assert!(send_shutdown); + let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())), + }; + if !shutdown_scriptpubkey.is_compatible(their_features) { + return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); + } + self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); + true + }, + }; - let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 { - assert!(self.context.shutdown_scriptpubkey.is_some()); + // From here on out, we may not fail! + + self.context.channel_state |= ChannelState::RemoteShutdownSent as u32; + self.context.update_time_counter += 1; + + let monitor_update = if update_shutdown_script { + self.context.latest_monitor_update_id += 1; + let monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ShutdownScript { + scriptpubkey: self.get_closing_scriptpubkey(), + }], + }; + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + if self.push_blockable_mon_update(monitor_update) { + self.context.pending_monitor_updates.last().map(|upd| &upd.update) + } else { None } + } else { None }; + let shutdown = if send_shutdown { Some(msgs::Shutdown { channel_id: self.context.channel_id, scriptpubkey: self.get_closing_scriptpubkey(), }) } else { None }; - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger); - - if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { - // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. - if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 || - self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { - if msg.next_remote_commitment_number != 0 { - return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned())); - } - // Short circuit the whole handler as there is nothing we can resend them - return Ok(ReestablishResponses { - channel_ready: None, - raa: None, commitment_update: None, - order: RAACommitmentOrder::CommitmentFirst, - shutdown_msg, announcement_sigs, - }); + // We can't send our shutdown until we've committed all of our pending HTLCs, but the + // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding + // cell HTLCs and return them to fail the payment. + self.context.holding_cell_update_fee = None; + let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); + self.context.holding_cell_htlc_updates.retain(|htlc_update| { + match htlc_update { + &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { + dropped_outbound_htlcs.push((source.clone(), payment_hash.clone())); + false + }, + _ => true } + }); - // We have OurChannelReady set! - let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - return Ok(ReestablishResponses { - channel_ready: Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }), - raa: None, commitment_update: None, - order: RAACommitmentOrder::CommitmentFirst, - shutdown_msg, announcement_sigs, - }); - } - - let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { - // Remote isn't waiting on any RevokeAndACK from us! - // Note that if we need to repeat our ChannelReady we'll do that in the next if block. - None - } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number { - if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { - self.context.monitor_pending_revoke_and_ack = true; - None - } else { - Some(self.get_last_revoke_and_ack()) - } - } else { - return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned())); - }; - - // We increment cur_counterparty_commitment_transaction_number only upon receipt of - // revoke_and_ack, not on sending commitment_signed, so we add one if have - // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten - // the corresponding revoke_and_ack back yet. - let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0; - if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() { - self.mark_awaiting_response(); - } - let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; + self.context.channel_state |= ChannelState::LocalShutdownSent as u32; + self.context.update_time_counter += 1; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { - // We should never have to worry about MonitorUpdateInProgress resending ChannelReady - let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) - } else { None }; + Ok((shutdown, monitor_update, dropped_outbound_htlcs)) + } - if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id())); - } else { - log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id())); - } + fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction { + let mut tx = closing_tx.trust().built_transaction().clone(); - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: None, - order: self.context.resend_order.clone(), - }) - } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { - if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id())); - } else { - log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id())); - } + tx.input[0].witness.push(Vec::new()); // First is the multisig dummy - if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { - self.context.monitor_pending_commitment_signed = true; - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - commitment_update: None, raa: None, - order: self.context.resend_order.clone(), - }) - } else { - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: Some(self.get_last_commitment_update(logger)), - order: self.context.resend_order.clone(), - }) - } + let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize(); + let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize(); + let mut holder_sig = sig.serialize_der().to_vec(); + holder_sig.push(EcdsaSighashType::All as u8); + let mut cp_sig = counterparty_sig.serialize_der().to_vec(); + cp_sig.push(EcdsaSighashType::All as u8); + if funding_key[..] < counterparty_funding_key[..] { + tx.input[0].witness.push(holder_sig); + tx.input[0].witness.push(cp_sig); } else { - Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned())) + tx.input[0].witness.push(cp_sig); + tx.input[0].witness.push(holder_sig); } + + tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes()); + tx } - /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole - /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart, - /// at which point they will be recalculated. - fn calculate_closing_fee_limits(&mut self, fee_estimator: &LowerBoundedFeeEstimator) - -> (u64, u64) + pub fn closing_signed( + &mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::ClosingSigned) + -> Result<(Option, Option), ChannelError> where F::Target: FeeEstimator { - if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); } - - // Propose a range from our current Background feerate to our Normal feerate plus our - // force_close_avoidance_max_fee_satoshis. - // If we fail to come to consensus, we'll have to force-close. - let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background); - let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() }; - - // The spec requires that (when the channel does not have anchors) we only send absolute - // channel fees no greater than the absolute channel fee on the current commitment - // transaction. It's unclear *which* commitment transaction this refers to, and there isn't - // very good reason to apply such a limit in any case. We don't bother doing so, risking - // some force-closure by old nodes, but we wanted to close the channel anyway. + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK { + return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned())); + } + if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { + return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned())); + } + if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() { + return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned())); + } + if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction + return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); + } - if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw { - let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) }; - proposed_feerate = cmp::max(proposed_feerate, min_feerate); - proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate); + if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() { + return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); } - // Note that technically we could end up with a lower minimum fee if one sides' balance is - // below our dust limit, causing the output to disappear. We don't bother handling this - // case, however, as this should only happen if a channel is closed before any (material) - // payments have been made on it. This may cause slight fee overpayment and/or failure to - // come to consensus with our counterparty on appropriate fees, however it should be a - // relatively rare case. We can revisit this later, though note that in order to determine - // if the funders' output is dust we have to know the absolute fee we're going to use. - let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); - let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; - let proposed_max_total_fee_satoshis = if self.context.is_outbound() { - // We always add force_close_avoidance_max_fee_satoshis to our normal - // feerate-calculated fee, but allow the max to be overridden if we're using a - // target feerate-calculated fee. - cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis, - proposed_max_feerate as u64 * tx_weight / 1000) - } else { - self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000 - }; + if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 { + self.context.pending_counterparty_closing_signed = Some(msg.clone()); + return Ok((None, None)); + } - self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis)); - self.context.closing_fee_limits.clone().unwrap() - } + let funding_redeemscript = self.context.get_funding_redeemscript(); + let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false); + if used_total_fee != msg.fee_satoshis { + return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); + } + let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); - /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true - /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At - /// this point if we're the funder we should send the initial closing_signed, and in any case - /// shutdown should complete within a reasonable timeframe. - fn closing_negotiation_ready(&self) -> bool { - self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() && - self.context.channel_state & - (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | - ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) - == BOTH_SIDES_SHUTDOWN_MASK && - self.context.pending_update_fee.is_none() - } + match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { + Ok(_) => {}, + Err(_e) => { + // The remote end may have decided to revoke their output due to inconsistent dust + // limits, so check for that case by re-checking the signature here. + closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0; + let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned()); + }, + }; - /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning - /// an Err if no progress is being made and the channel should be force-closed instead. - /// Should be called on a one-minute timer. - pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> { - if self.closing_negotiation_ready() { - if self.context.closing_signed_in_flight { - return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned())); - } else { - self.context.closing_signed_in_flight = true; + for outp in closing_tx.trust().built_transaction().output.iter() { + if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned())); } } - Ok(()) - } - - pub fn maybe_propose_closing_signed( - &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) - -> Result<(Option, Option), ChannelError> - where F::Target: FeeEstimator, L::Target: Logger - { - if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() { - return Ok((None, None)); - } - if !self.context.is_outbound() { - if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() { - return self.closing_signed(fee_estimator, &msg); + assert!(self.context.shutdown_scriptpubkey.is_some()); + if let Some((last_fee, sig)) = self.context.last_sent_closing_fee { + if last_fee == msg.fee_satoshis { + let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); + self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.update_time_counter += 1; + return Ok((None, Some(tx))); } - return Ok((None, None)); } let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); - assert!(self.context.shutdown_scriptpubkey.is_some()); - let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false); - log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", - our_min_fee, our_max_fee, total_fee_satoshis); + macro_rules! propose_fee { + ($new_fee: expr) => { + let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis { + (closing_tx, $new_fee) + } else { + self.build_closing_transaction($new_fee, false) + }; - let sig = self.context.holder_signer - .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) - .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; - - self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); - Ok((Some(msgs::ClosingSigned { - channel_id: self.context.channel_id, - fee_satoshis: total_fee_satoshis, - signature: sig, - fee_range: Some(msgs::ClosingSignedFeeRange { - min_fee_satoshis: our_min_fee, - max_fee_satoshis: our_max_fee, - }), - }), None)) - } - - // Marks a channel as waiting for a response from the counterparty. If it's not received - // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt - // a reconnection. - fn mark_awaiting_response(&mut self) { - self.context.sent_message_awaiting_response = Some(0); - } - - /// Determines whether we should disconnect the counterparty due to not receiving a response - /// within our expected timeframe. - /// - /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`]. - pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool { - let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() { - ticks_elapsed - } else { - // Don't disconnect when we're not waiting on a response. - return false; - }; - *ticks_elapsed += 1; - *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - } - - pub fn shutdown( - &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown - ) -> Result<(Option, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError> - where SP::Target: SignerProvider - { - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); - } - if self.context.channel_state < ChannelState::FundingSent as u32 { - // Spec says we should fail the connection, not the channel, but that's nonsense, there - // are plenty of reasons you may want to fail a channel pre-funding, and spec says you - // can do that via error message without getting a connection fail anyway... - return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned())); - } - for htlc in self.context.pending_inbound_htlcs.iter() { - if let InboundHTLCState::RemoteAnnounced(_) = htlc.state { - return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned())); - } - } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - - if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) { - return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex()))); - } - - if self.context.counterparty_shutdown_scriptpubkey.is_some() { - if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() { - return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex()))); - } - } else { - self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone()); - } - - // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc - // immediately after the commitment dance, but we can send a Shutdown because we won't send - // any further commitment updates after we set LocalShutdownSent. - let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32; - - let update_shutdown_script = match self.context.shutdown_scriptpubkey { - Some(_) => false, - None => { - assert!(send_shutdown); - let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => scriptpubkey, - Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())), - }; - if !shutdown_scriptpubkey.is_compatible(their_features) { - return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); - } - self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); - true - }, - }; - - // From here on out, we may not fail! - - self.context.channel_state |= ChannelState::RemoteShutdownSent as u32; - self.context.update_time_counter += 1; - - let monitor_update = if update_shutdown_script { - self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::ShutdownScript { - scriptpubkey: self.get_closing_scriptpubkey(), - }], - }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - if self.push_blockable_mon_update(monitor_update) { - self.context.pending_monitor_updates.last().map(|upd| &upd.update) - } else { None } - } else { None }; - let shutdown = if send_shutdown { - Some(msgs::Shutdown { - channel_id: self.context.channel_id, - scriptpubkey: self.get_closing_scriptpubkey(), - }) - } else { None }; - - // We can't send our shutdown until we've committed all of our pending HTLCs, but the - // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding - // cell HTLCs and return them to fail the payment. - self.context.holding_cell_update_fee = None; - let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); - self.context.holding_cell_htlc_updates.retain(|htlc_update| { - match htlc_update { - &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { - dropped_outbound_htlcs.push((source.clone(), payment_hash.clone())); - false - }, - _ => true - } - }); - - self.context.channel_state |= ChannelState::LocalShutdownSent as u32; - self.context.update_time_counter += 1; - - Ok((shutdown, monitor_update, dropped_outbound_htlcs)) - } - - fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction { - let mut tx = closing_tx.trust().built_transaction().clone(); - - tx.input[0].witness.push(Vec::new()); // First is the multisig dummy - - let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize(); - let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize(); - let mut holder_sig = sig.serialize_der().to_vec(); - holder_sig.push(EcdsaSighashType::All as u8); - let mut cp_sig = counterparty_sig.serialize_der().to_vec(); - cp_sig.push(EcdsaSighashType::All as u8); - if funding_key[..] < counterparty_funding_key[..] { - tx.input[0].witness.push(holder_sig); - tx.input[0].witness.push(cp_sig); - } else { - tx.input[0].witness.push(cp_sig); - tx.input[0].witness.push(holder_sig); - } - - tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes()); - tx - } - - pub fn closing_signed( - &mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::ClosingSigned) - -> Result<(Option, Option), ChannelError> - where F::Target: FeeEstimator - { - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK { - return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned())); - } - if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned())); - } - if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() { - return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned())); - } - if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction - return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); - } - - if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() { - return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); - } - - if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 { - self.context.pending_counterparty_closing_signed = Some(msg.clone()); - return Ok((None, None)); - } - - let funding_redeemscript = self.context.get_funding_redeemscript(); - let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false); - if used_total_fee != msg.fee_satoshis { - return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee))); - } - let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); - - match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { - Ok(_) => {}, - Err(_e) => { - // The remote end may have decided to revoke their output due to inconsistent dust - // limits, so check for that case by re-checking the signature here. - closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0; - let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis); - secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned()); - }, - }; - - for outp in closing_tx.trust().built_transaction().output.iter() { - if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned())); - } - } - - assert!(self.context.shutdown_scriptpubkey.is_some()); - if let Some((last_fee, sig)) = self.context.last_sent_closing_fee { - if last_fee == msg.fee_satoshis { - let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); - self.context.channel_state = ChannelState::ShutdownComplete as u32; - self.context.update_time_counter += 1; - return Ok((None, Some(tx))); - } - } - - let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); - - macro_rules! propose_fee { - ($new_fee: expr) => { - let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis { - (closing_tx, $new_fee) - } else { - self.build_closing_transaction($new_fee, false) - }; - - let sig = self.context.holder_signer - .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; + let sig = self.context.holder_signer + .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; let signed_tx = if $new_fee == msg.fee_satoshis { self.context.channel_state = ChannelState::ShutdownComplete as u32; @@ -5368,972 +5036,1305 @@ impl Channel { // We never learned about the funding confirmation anyway, just ignore Ok(()) } - } + } + + // Methods to get unprompted messages to send to the remote end (or where we already returned + // something in the handler for the message that prompted this message): + + pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { + if !self.context.is_outbound() { + panic!("Tried to open a channel for an inbound channel?"); + } + if self.context.channel_state != ChannelState::OurInitSent as u32 { + panic!("Cannot generate an open_channel after we've moved forward"); + } + + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Tried to send an open_channel for a channel that has already advanced"); + } + + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let keys = self.context.get_holder_pubkeys(); + + msgs::OpenChannel { + chain_hash, + temporary_channel_id: self.context.channel_id, + funding_satoshis: self.context.channel_value_satoshis, + push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + feerate_per_kw: self.context.feerate_per_kw as u32, + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint, + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint, + htlc_basepoint: keys.htlc_basepoint, + first_per_commitment_point, + channel_flags: if self.context.config.announced_channel {1} else {0}, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + } + } + + pub fn inbound_is_awaiting_accept(&self) -> bool { + self.context.inbound_awaiting_accept + } + + /// Sets this channel to accepting 0conf, must be done before `get_accept_channel` + pub fn set_0conf(&mut self) { + assert!(self.context.inbound_awaiting_accept); + self.context.minimum_depth = Some(0); + } + + /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which + /// should be sent back to the counterparty node. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { + if self.context.is_outbound() { + panic!("Tried to send accept_channel for an outbound channel?"); + } + if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { + panic!("Tried to send accept_channel after channel had moved forward"); + } + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Tried to send an accept_channel for a channel that has already advanced"); + } + if !self.context.inbound_awaiting_accept { + panic!("The inbound channel has already been accepted"); + } + + self.context.user_id = user_id; + self.context.inbound_awaiting_accept = false; + + self.generate_accept_channel_message() + } + + /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an + /// inbound channel. If the intention is to accept an inbound channel, use + /// [`Channel::accept_inbound_channel`] instead. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let keys = self.context.get_holder_pubkeys(); + + msgs::AcceptChannel { + temporary_channel_id: self.context.channel_id, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + minimum_depth: self.context.minimum_depth.unwrap(), + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint, + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint, + htlc_basepoint: keys.htlc_basepoint, + first_per_commitment_point, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + #[cfg(taproot)] + next_local_nonce: None, + } + } + + /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an + /// inbound channel without accepting it. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + #[cfg(test)] + pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel { + self.generate_accept_channel_message() + } + + /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) + fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) + } + + /// Updates channel state with knowledge of the funding transaction's txid/index, and generates + /// a funding_created message for the remote peer. + /// Panics if called at some time other than immediately after initial handshake, if called twice, + /// or if called on an inbound channel. + /// Note that channel_id changes during this call! + /// Do NOT broadcast the funding transaction until after a successful funding_signed call! + /// If an Err is returned, it is a ChannelError::Close. + pub fn get_outbound_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result where L::Target: Logger { + if !self.context.is_outbound() { + panic!("Tried to create outbound funding_created message on an inbound channel!"); + } + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + } + + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + + let signature = match self.get_outbound_funding_created_signature(logger) { + Ok(res) => res, + Err(e) => { + log_error!(logger, "Got bad signatures: {:?}!", e); + self.context.channel_transaction_parameters.funding_outpoint = None; + return Err(e); + } + }; + + let temporary_channel_id = self.context.channel_id; + + // Now that we're past error-generating stuff, update our local state: + + self.context.channel_state = ChannelState::FundingCreated as u32; + self.context.channel_id = funding_txo.to_channel_id(); + self.context.funding_transaction = Some(funding_transaction); + + Ok(msgs::FundingCreated { + temporary_channel_id, + funding_txid: funding_txo.txid, + funding_output_index: funding_txo.index, + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + #[cfg(taproot)] + next_local_nonce: None, + }) + } + + /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly + /// announceable and available for use (have exchanged ChannelReady messages in both + /// directions). Should be used for both broadcasted announcements and in response to an + /// AnnouncementSignatures message from the remote peer. + /// + /// Will only fail if we're not in a state where channel_announcement may be sent (including + /// closing). + /// + /// This will only return ChannelError::Ignore upon failure. + fn get_channel_announcement( + &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig, + ) -> Result where NS::Target: NodeSigner { + if !self.context.config.announced_channel { + return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); + } + if !self.context.is_usable() { + return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned())); + } + + let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) + .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?); + let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id()); + let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice(); + + let msg = msgs::UnsignedChannelAnnouncement { + features: channelmanager::provided_channel_features(&user_config), + chain_hash, + short_channel_id: self.context.get_short_channel_id().unwrap(), + node_id_1: if were_node_one { node_id } else { counterparty_node_id }, + node_id_2: if were_node_one { counterparty_node_id } else { node_id }, + bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }), + bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }), + excess_data: Vec::new(), + }; + + Ok(msg) + } + + fn get_announcement_sigs( + &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig, + best_block_height: u32, logger: &L + ) -> Option + where + NS::Target: NodeSigner, + L::Target: Logger + { + if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { + return None; + } + + if !self.context.is_usable() { + return None; + } + + if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 { + log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected"); + return None; + } + + if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent { + return None; + } + + log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id())); + let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) { + Ok(a) => a, + Err(e) => { + log_trace!(logger, "{:?}", e); + return None; + } + }; + let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) { + Err(_) => { + log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!"); + return None; + }, + Ok(v) => v + }; + let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) { + Err(_) => { + log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!"); + return None; + }, + Ok(v) => v + }; + self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent; + + Some(msgs::AnnouncementSignatures { + channel_id: self.context.channel_id(), + short_channel_id: self.context.get_short_channel_id().unwrap(), + node_signature: our_node_sig, + bitcoin_signature: our_bitcoin_sig, + }) + } + + /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are + /// available. + fn sign_channel_announcement( + &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement + ) -> Result where NS::Target: NodeSigner { + if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs { + let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) + .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?); + let were_node_one = announcement.node_id_1 == our_node_key; + + let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) + .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?; + let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) + .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?; + Ok(msgs::ChannelAnnouncement { + node_signature_1: if were_node_one { our_node_sig } else { their_node_sig }, + node_signature_2: if were_node_one { their_node_sig } else { our_node_sig }, + bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig }, + bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig }, + contents: announcement, + }) + } else { + Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string())) + } + } + + /// Processes an incoming announcement_signatures message, providing a fully-signed + /// channel_announcement message which we can broadcast and storing our counterparty's + /// signatures for later reconstruction/rebroadcast of the channel_announcement. + pub fn announcement_signatures( + &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, + msg: &msgs::AnnouncementSignatures, user_config: &UserConfig + ) -> Result where NS::Target: NodeSigner { + let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?; + + let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); + + if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() { + return Err(ChannelError::Close(format!( + "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", + &announcement, self.context.get_counterparty_node_id()))); + } + if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() { + return Err(ChannelError::Close(format!( + "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", + &announcement, self.context.counterparty_funding_pubkey()))); + } + + self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature)); + if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { + return Err(ChannelError::Ignore( + "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned())); + } + + self.sign_channel_announcement(node_signer, announcement) + } + + /// Gets a signed channel_announcement for this channel, if we previously received an + /// announcement_signatures from our counterparty. + pub fn get_signed_channel_announcement( + &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig + ) -> Option where NS::Target: NodeSigner { + if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { + return None; + } + let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) { + Ok(res) => res, + Err(_) => return None, + }; + match self.sign_channel_announcement(node_signer, announcement) { + Ok(res) => Some(res), + Err(_) => None, + } + } + + /// May panic if called on a channel that wasn't immediately-previously + /// self.remove_uncommitted_htlcs_and_mark_paused()'d + pub fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { + assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32); + assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); + // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming + // current to_remote balances. However, it no longer has any use, and thus is now simply + // set to a dummy (but valid, as required by the spec) public key. + // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey" + // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both + // valid, and valid in fuzzing mode's arbitrary validity criteria: + let mut pk = [2; 33]; pk[1] = 0xff; + let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); + let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { + let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap(); + log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id())); + remote_last_secret + } else { + log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id())); + [0;32] + }; + self.mark_awaiting_response(); + msgs::ChannelReestablish { + channel_id: self.context.channel_id(), + // The protocol has two different commitment number concepts - the "commitment + // transaction number", which starts from 0 and counts up, and the "revocation key + // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track + // commitment transaction numbers by the index which will be used to reveal the + // revocation key for that commitment transaction, which means we have to convert them + // to protocol-level commitment numbers here... + + // next_local_commitment_number is the next commitment_signed number we expect to + // receive (indicating if they need to resend one that we missed). + next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number, + // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to + // receive, however we track it by the next commitment number for a remote transaction + // (which is one further, as they always revoke previous commitment transaction, not + // the one we send) so we have to decrement by 1. Note that if + // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have + // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't + // overflow here. + next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1, + your_last_per_commitment_secret: remote_last_secret, + my_current_per_commitment_point: dummy_pubkey, + // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction + // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the + // txid of that interactive transaction, else we MUST NOT set it. + next_funding_txid: None, + } + } + + + // Send stuff to our remote peers: + + /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call + /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the + /// commitment update. + /// + /// `Err`s will only be [`ChannelError::Ignore`]. + pub fn queue_add_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, + onion_routing_packet: msgs::OnionPacket, logger: &L) + -> Result<(), ChannelError> where L::Target: Logger { + self + .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger) + .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) + .map_err(|err| { + if let ChannelError::Ignore(_) = err { /* fine */ } + else { debug_assert!(false, "Queueing cannot trigger channel failure"); } + err + }) + } + + /// Adds a pending outbound HTLC to this channel, note that you probably want + /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once. + /// + /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on + /// the wire: + /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we + /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates + /// awaiting ACK. + /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as + /// we may not yet have sent the previous commitment update messages and will need to + /// regenerate them. + /// + /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods + /// on this [`Channel`] if `force_holding_cell` is false. + /// + /// `Err`s will only be [`ChannelError::Ignore`]. + fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, + onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L) + -> Result, ChannelError> where L::Target: Logger { + if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) { + return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); + } + let channel_total_msat = self.context.channel_value_satoshis * 1000; + if amount_msat > channel_total_msat { + return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat))); + } - // Methods to get unprompted messages to send to the remote end (or where we already returned - // something in the handler for the message that prompted this message): + if amount_msat == 0 { + return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned())); + } - pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { - if !self.context.is_outbound() { - panic!("Tried to open a channel for an inbound channel?"); + let available_balances = self.context.get_available_balances(); + if amount_msat < available_balances.next_outbound_htlc_minimum_msat { + return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat", + available_balances.next_outbound_htlc_minimum_msat))); } - if self.context.channel_state != ChannelState::OurInitSent as u32 { - panic!("Cannot generate an open_channel after we've moved forward"); + + if amount_msat > available_balances.next_outbound_htlc_limit_msat { + return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat", + available_balances.next_outbound_htlc_limit_msat))); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Tried to send an open_channel for a channel that has already advanced"); + if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 { + // Note that this should never really happen, if we're !is_live() on receipt of an + // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow + // the user to send directly into a !is_live() channel. However, if we + // disconnected during the time the previous hop was doing the commitment dance we may + // end up getting here after the forwarding delay. In any case, returning an + // IgnoreError will get ChannelManager to do the right thing and fail backwards now. + return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); } - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.context.get_holder_pubkeys(); + let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0; + log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat, + if force_holding_cell { "into holding cell" } + else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" } + else { "to peer" }); - msgs::OpenChannel { - chain_hash, - temporary_channel_id: self.context.channel_id, - funding_satoshis: self.context.channel_value_satoshis, - push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - feerate_per_kw: self.context.feerate_per_kw as u32, - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint, - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint, - htlc_basepoint: keys.htlc_basepoint, - first_per_commitment_point, - channel_flags: if self.context.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), + if need_holding_cell { + force_holding_cell = true; } - } - pub fn inbound_is_awaiting_accept(&self) -> bool { - self.context.inbound_awaiting_accept - } + // Now update local state: + if force_holding_cell { + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC { + amount_msat, + payment_hash, + cltv_expiry, + source, + onion_routing_packet, + }); + return Ok(None); + } - /// Sets this channel to accepting 0conf, must be done before `get_accept_channel` - pub fn set_0conf(&mut self) { - assert!(self.context.inbound_awaiting_accept); - self.context.minimum_depth = Some(0); + self.context.pending_outbound_htlcs.push(OutboundHTLCOutput { + htlc_id: self.context.next_holder_htlc_id, + amount_msat, + payment_hash: payment_hash.clone(), + cltv_expiry, + state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())), + source, + }); + + let res = msgs::UpdateAddHTLC { + channel_id: self.context.channel_id, + htlc_id: self.context.next_holder_htlc_id, + amount_msat, + payment_hash, + cltv_expiry, + onion_routing_packet, + }; + self.context.next_holder_htlc_id += 1; + + Ok(Some(res)) } - /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which - /// should be sent back to the counterparty node. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { - if self.context.is_outbound() { - panic!("Tried to send accept_channel for an outbound channel?"); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { - panic!("Tried to send accept_channel after channel had moved forward"); + fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger { + log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); + // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we + // fail to generate this, we still are at least at a position where upgrading their status + // is acceptable. + for htlc in self.context.pending_inbound_htlcs.iter_mut() { + let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state { + Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) + } else { None }; + if let Some(state) = new_state { + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + htlc.state = state; + } } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Tried to send an accept_channel for a channel that has already advanced"); + for htlc in self.context.pending_outbound_htlcs.iter_mut() { + if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { + log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + // Grab the preimage, if it exists, instead of cloning + let mut reason = OutboundHTLCOutcome::Success(None); + mem::swap(outcome, &mut reason); + htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason); + } } - if !self.context.inbound_awaiting_accept { - panic!("The inbound channel has already been accepted"); + if let Some((feerate, update_state)) = self.context.pending_update_fee { + if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { + debug_assert!(!self.context.is_outbound()); + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); + self.context.feerate_per_kw = feerate; + self.context.pending_update_fee = None; + } } + self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst; - self.context.user_id = user_id; - self.context.inbound_awaiting_accept = false; - - self.generate_accept_channel_message() - } - - /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an - /// inbound channel. If the intention is to accept an inbound channel, use - /// [`Channel::accept_inbound_channel`] instead. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.context.get_holder_pubkeys(); + let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger); + let htlcs: Vec<(HTLCOutputInCommitment, Option>)> = + htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); - msgs::AcceptChannel { - temporary_channel_id: self.context.channel_id, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - minimum_depth: self.context.minimum_depth.unwrap(), - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint, - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint, - htlc_basepoint: keys.htlc_basepoint, - first_per_commitment_point, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), - #[cfg(taproot)] - next_local_nonce: None, + if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent { + self.context.announcement_sigs_state = AnnouncementSigsState::Committed; } - } - /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an - /// inbound channel without accepting it. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - #[cfg(test)] - pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel { - self.generate_accept_channel_message() + self.context.latest_monitor_update_id += 1; + let monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { + commitment_txid: counterparty_commitment_txid, + htlc_outputs: htlcs.clone(), + commitment_number: self.context.cur_counterparty_commitment_transaction_number, + their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap() + }] + }; + self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; + monitor_update } - /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) - fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { + fn build_commitment_no_state_update(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) - } + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); - /// Updates channel state with knowledge of the funding transaction's txid/index, and generates - /// a funding_created message for the remote peer. - /// Panics if called at some time other than immediately after initial handshake, if called twice, - /// or if called on an inbound channel. - /// Note that channel_id changes during this call! - /// Do NOT broadcast the funding transaction until after a successful funding_signed call! - /// If an Err is returned, it is a ChannelError::Close. - pub fn get_outbound_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result where L::Target: Logger { - if !self.context.is_outbound() { - panic!("Tried to create outbound funding_created message on an inbound channel!"); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { - panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + #[cfg(any(test, fuzzing))] + { + if !self.context.is_outbound() { + let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); + *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; + if let Some(info) = projected_commit_tx_info { + let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); + if info.total_pending_htlcs == total_pending_htlcs + && info.next_holder_htlc_id == self.context.next_holder_htlc_id + && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id + && info.feerate == self.context.feerate_per_kw { + let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors()); + assert_eq!(actual_fee, info.fee); + } + } + } } - self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + (counterparty_commitment_txid, commitment_stats.htlcs_included) + } + + /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed + /// generation when we shouldn't change HTLC/channel state. + fn send_commitment_no_state_update(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger { + // Get the fee tests from `build_commitment_no_state_update` + #[cfg(any(test, fuzzing))] + self.build_commitment_no_state_update(logger); + + let counterparty_keys = self.context.build_remote_transaction_keys(); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); + let (signature, htlc_signatures); - let signature = match self.get_outbound_funding_created_signature(logger) { - Ok(res) => res, - Err(e) => { - log_error!(logger, "Got bad signatures: {:?}!", e); - self.context.channel_transaction_parameters.funding_outpoint = None; - return Err(e); + { + let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len()); + for &(ref htlc, _) in commitment_stats.htlcs_included.iter() { + htlcs.push(htlc); } - }; - let temporary_channel_id = self.context.channel_id; + let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?; + signature = res.0; + htlc_signatures = res.1; - // Now that we're past error-generating stuff, update our local state: + log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", + encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), + &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), + log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id())); - self.context.channel_state = ChannelState::FundingCreated as u32; - self.context.channel_id = funding_txo.to_channel_id(); - self.context.funding_transaction = Some(funding_transaction); + for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { + log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", + encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), + encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)), + log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), + log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id())); + } + } - Ok(msgs::FundingCreated { - temporary_channel_id, - funding_txid: funding_txo.txid, - funding_output_index: funding_txo.index, + Ok((msgs::CommitmentSigned { + channel_id: self.context.channel_id, signature, + htlc_signatures, #[cfg(taproot)] partial_signature_with_nonce: None, - #[cfg(taproot)] - next_local_nonce: None, - }) + }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) } - /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly - /// announceable and available for use (have exchanged ChannelReady messages in both - /// directions). Should be used for both broadcasted announcements and in response to an - /// AnnouncementSignatures message from the remote peer. - /// - /// Will only fail if we're not in a state where channel_announcement may be sent (including - /// closing). + /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment + /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go. /// - /// This will only return ChannelError::Ignore upon failure. - fn get_channel_announcement( - &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig, - ) -> Result where NS::Target: NodeSigner { - if !self.context.config.announced_channel { - return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); - } - if !self.context.is_usable() { - return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned())); + /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on + /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info. + pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { + let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger); + if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } } + match send_res? { + Some(_) => { + let monitor_update = self.build_commitment_no_status_check(logger); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + Ok(self.push_ret_blockable_mon_update(monitor_update)) + }, + None => Ok(None) } + } - let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) - .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?); - let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id()); - let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice(); - - let msg = msgs::UnsignedChannelAnnouncement { - features: channelmanager::provided_channel_features(&user_config), - chain_hash, - short_channel_id: self.context.get_short_channel_id().unwrap(), - node_id_1: if were_node_one { node_id } else { counterparty_node_id }, - node_id_2: if were_node_one { counterparty_node_id } else { node_id }, - bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }), - bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }), - excess_data: Vec::new(), - }; + pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> { + if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 { + return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string())); + } + self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo { + fee_base_msat: msg.contents.fee_base_msat, + fee_proportional_millionths: msg.contents.fee_proportional_millionths, + cltv_expiry_delta: msg.contents.cltv_expiry_delta + }); - Ok(msg) + Ok(()) } - fn get_announcement_sigs( - &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig, - best_block_height: u32, logger: &L - ) -> Option - where - NS::Target: NodeSigner, - L::Target: Logger - { - if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { - return None; + /// Begins the shutdown process, getting a message for the remote peer and returning all + /// holding cell HTLCs for payment failure. + /// + /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no + /// [`ChannelMonitorUpdate`] will be returned). + pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, + target_feerate_sats_per_kw: Option, override_shutdown_script: Option) + -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError> + where SP::Target: SignerProvider { + for htlc in self.context.pending_outbound_htlcs.iter() { + if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { + return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()}); + } } - - if !self.context.is_usable() { - return None; + if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 { + if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 { + return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()}); + } + else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 { + return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); + } } - - if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 { - log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected"); - return None; + if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { + return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()}); + } + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); + if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { + return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); } - if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent { - return None; + // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown + // script is set, we just force-close and call it a day. + let mut chan_closed = false; + if self.context.channel_state < ChannelState::FundingSent as u32 { + chan_closed = true; } - log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id())); - let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) { - Ok(a) => a, - Err(e) => { - log_trace!(logger, "{:?}", e); - return None; - } - }; - let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) { - Err(_) => { - log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!"); - return None; - }, - Ok(v) => v - }; - let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) { - Err(_) => { - log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!"); - return None; + let update_shutdown_script = match self.context.shutdown_scriptpubkey { + Some(_) => false, + None if !chan_closed => { + // use override shutdown script if provided + let shutdown_scriptpubkey = match override_shutdown_script { + Some(script) => script, + None => { + // otherwise, use the shutdown scriptpubkey provided by the signer + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}), + } + }, + }; + if !shutdown_scriptpubkey.is_compatible(their_features) { + return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); + } + self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); + true }, - Ok(v) => v + None => false, }; - self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent; - - Some(msgs::AnnouncementSignatures { - channel_id: self.context.channel_id(), - short_channel_id: self.context.get_short_channel_id().unwrap(), - node_signature: our_node_sig, - bitcoin_signature: our_bitcoin_sig, - }) - } - - /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are - /// available. - fn sign_channel_announcement( - &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement - ) -> Result where NS::Target: NodeSigner { - if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs { - let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) - .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?); - let were_node_one = announcement.node_id_1 == our_node_key; - let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) - .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?; - let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) - .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?; - Ok(msgs::ChannelAnnouncement { - node_signature_1: if were_node_one { our_node_sig } else { their_node_sig }, - node_signature_2: if were_node_one { their_node_sig } else { our_node_sig }, - bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig }, - bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig }, - contents: announcement, - }) + // From here on out, we may not fail! + self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; + if self.context.channel_state < ChannelState::FundingSent as u32 { + self.context.channel_state = ChannelState::ShutdownComplete as u32; } else { - Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string())) + self.context.channel_state |= ChannelState::LocalShutdownSent as u32; } - } + self.context.update_time_counter += 1; + + let monitor_update = if update_shutdown_script { + self.context.latest_monitor_update_id += 1; + let monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ShutdownScript { + scriptpubkey: self.get_closing_scriptpubkey(), + }], + }; + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + if self.push_blockable_mon_update(monitor_update) { + self.context.pending_monitor_updates.last().map(|upd| &upd.update) + } else { None } + } else { None }; + let shutdown = msgs::Shutdown { + channel_id: self.context.channel_id, + scriptpubkey: self.get_closing_scriptpubkey(), + }; - /// Processes an incoming announcement_signatures message, providing a fully-signed - /// channel_announcement message which we can broadcast and storing our counterparty's - /// signatures for later reconstruction/rebroadcast of the channel_announcement. - pub fn announcement_signatures( - &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, - msg: &msgs::AnnouncementSignatures, user_config: &UserConfig - ) -> Result where NS::Target: NodeSigner { - let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?; + // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send + // our shutdown until we've committed all of the pending changes. + self.context.holding_cell_update_fee = None; + let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); + self.context.holding_cell_htlc_updates.retain(|htlc_update| { + match htlc_update { + &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { + dropped_outbound_htlcs.push((source.clone(), payment_hash.clone())); + false + }, + _ => true + } + }); - let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); + debug_assert!(!self.is_shutdown() || monitor_update.is_none(), + "we can't both complete shutdown and return a monitor update"); - if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() { - return Err(ChannelError::Close(format!( - "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", - &announcement, self.context.get_counterparty_node_id()))); - } - if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() { - return Err(ChannelError::Close(format!( - "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", - &announcement, self.context.counterparty_funding_pubkey()))); - } + Ok((shutdown, monitor_update, dropped_outbound_htlcs)) + } - self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature)); - if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { - return Err(ChannelError::Ignore( - "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned())); + /// Gets the latest commitment transaction and any dependent transactions for relay (forcing + /// shutdown of this channel - no more calls into this Channel may be made afterwards except + /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). + /// Also returns the list of payment_hashes for channels which we can safely fail backwards + /// immediately (others we will have to allow to time out). + pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { + // Note that we MUST only generate a monitor update that indicates force-closure - we're + // called during initialization prior to the chain_monitor in the encompassing ChannelManager + // being fully configured in some cases. Thus, its likely any monitor events we generate will + // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. + assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32); + + // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and + // return them to fail the payment. + let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); + let counterparty_node_id = self.context.get_counterparty_node_id(); + for htlc_update in self.context.holding_cell_htlc_updates.drain(..) { + match htlc_update { + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { + dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id)); + }, + _ => {} + } } + let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() { + // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), + // returning a channel monitor update here would imply a channel monitor update before + // we even registered the channel monitor to begin with, which is invalid. + // Thus, if we aren't actually at a point where we could conceivably broadcast the + // funding transaction, don't return a funding txo (which prevents providing the + // monitor update to the user, even if we return one). + // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. + if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { + self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; + Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], + })) + } else { None } + } else { None }; - self.sign_channel_announcement(node_signer, announcement) + self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.update_time_counter += 1; + (monitor_update, dropped_outbound_htlcs) } - /// Gets a signed channel_announcement for this channel, if we previously received an - /// announcement_signatures from our counterparty. - pub fn get_signed_channel_announcement( - &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig - ) -> Option where NS::Target: NodeSigner { - if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { - return None; - } - let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) { - Ok(res) => res, - Err(_) => return None, - }; - match self.sign_channel_announcement(node_signer, announcement) { - Ok(res) => Some(res), - Err(_) => None, - } + pub fn inflight_htlc_sources(&self) -> impl Iterator { + self.context.holding_cell_htlc_updates.iter() + .flat_map(|htlc_update| { + match htlc_update { + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } + => Some((source, payment_hash)), + _ => None, + } + }) + .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash))) } +} - /// May panic if called on a channel that wasn't immediately-previously - /// self.remove_uncommitted_htlcs_and_mark_paused()'d - pub fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { - assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32); - assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); - // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming - // current to_remote balances. However, it no longer has any use, and thus is now simply - // set to a dummy (but valid, as required by the spec) public key. - // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey" - // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both - // valid, and valid in fuzzing mode's arbitrary validity criteria: - let mut pk = [2; 33]; pk[1] = 0xff; - let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); - let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { - let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id())); - remote_last_secret - } else { - log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id())); - [0;32] - }; - self.mark_awaiting_response(); - msgs::ChannelReestablish { - channel_id: self.context.channel_id(), - // The protocol has two different commitment number concepts - the "commitment - // transaction number", which starts from 0 and counts up, and the "revocation key - // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track - // commitment transaction numbers by the index which will be used to reveal the - // revocation key for that commitment transaction, which means we have to convert them - // to protocol-level commitment numbers here... +/// A not-yet-funded outbound (from holder) channel using V1 channel establishment. +pub(super) struct OutboundV1Channel { + pub context: ChannelContext, +} - // next_local_commitment_number is the next commitment_signed number we expect to - // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number, - // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to - // receive, however we track it by the next commitment number for a remote transaction - // (which is one further, as they always revoke previous commitment transaction, not - // the one we send) so we have to decrement by 1. Note that if - // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have - // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't - // overflow here. - next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1, - your_last_per_commitment_secret: remote_last_secret, - my_current_per_commitment_point: dummy_pubkey, - // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction - // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the - // txid of that interactive transaction, else we MUST NOT set it. - next_funding_txid: None, +impl OutboundV1Channel { + fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { + // The default channel type (ie the first one we try) depends on whether the channel is + // public - if it is, we just go with `only_static_remotekey` as it's the only option + // available. If it's private, we first try `scid_privacy` as it provides better privacy + // with no other changes, and fall back to `only_static_remotekey`. + let mut ret = ChannelTypeFeatures::only_static_remote_key(); + if !config.channel_handshake_config.announced_channel && + config.channel_handshake_config.negotiate_scid_privacy && + their_features.supports_scid_privacy() { + ret.set_scid_privacy_required(); } - } - - // Send stuff to our remote peers: + // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we + // set it now. If they don't understand it, we'll fall back to our default of + // `only_static_remotekey`. + #[cfg(anchors)] + { // Attributes are not allowed on if expressions on our current MSRV of 1.41. + if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && + their_features.supports_anchors_zero_fee_htlc_tx() { + ret.set_anchors_zero_fee_htlc_tx_required(); + } + } - /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call - /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the - /// commitment update. - /// - /// `Err`s will only be [`ChannelError::Ignore`]. - pub fn queue_add_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, - onion_routing_packet: msgs::OnionPacket, logger: &L) - -> Result<(), ChannelError> where L::Target: Logger { - self - .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger) - .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) - .map_err(|err| { - if let ChannelError::Ignore(_) = err { /* fine */ } - else { debug_assert!(false, "Queueing cannot trigger channel failure"); } - err - }) + ret } - /// Adds a pending outbound HTLC to this channel, note that you probably want - /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once. - /// - /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on - /// the wire: - /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we - /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates - /// awaiting ACK. - /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as - /// we may not yet have sent the previous commitment update messages and will need to - /// regenerate them. - /// - /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods - /// on this [`Channel`] if `force_holding_cell` is false. - /// - /// `Err`s will only be [`ChannelError::Ignore`]. - fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, - onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L) - -> Result, ChannelError> where L::Target: Logger { - if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) { - return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); + pub fn new_outbound( + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, + channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, + outbound_scid_alias: u64 + ) -> Result, APIError> + where ES::Target: EntropySource, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + { + let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; + let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); + let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); + let pubkeys = holder_signer.pubkeys().clone(); + + if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { + return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); } - let channel_total_msat = self.context.channel_value_satoshis * 1000; - if amount_msat > channel_total_msat { - return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat))); + if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); + } + let channel_value_msat = channel_value_satoshis * 1000; + if push_msat > channel_value_msat { + return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); + } + if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { + return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); + } + let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + // Protocol level safety check in place, although it should never happen because + // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` + return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); } - if amount_msat == 0 { - return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned())); - } + let channel_type = Self::get_initial_channel_type(&config, their_features); + debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); - let available_balances = self.context.get_available_balances(); - if amount_msat < available_balances.next_outbound_htlc_minimum_msat { - return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat", - available_balances.next_outbound_htlc_minimum_msat))); - } + let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - if amount_msat > available_balances.next_outbound_htlc_limit_msat { - return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat", - available_balances.next_outbound_htlc_limit_msat))); + let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; + let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); + if value_to_self_msat < commitment_tx_fee { + return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); } - if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 { - // Note that this should never really happen, if we're !is_live() on receipt of an - // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow - // the user to send directly into a !is_live() channel. However, if we - // disconnected during the time the previous hop was doing the commitment dance we may - // end up getting here after the forwarding delay. In any case, returning an - // IgnoreError will get ChannelManager to do the right thing and fail backwards now. - return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); - } + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0; - log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat, - if force_holding_cell { "into holding cell" } - else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" } - else { "to peer" }); + let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), + } + } else { None }; - if need_holding_cell { - force_holding_cell = true; + if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { + if !shutdown_scriptpubkey.is_compatible(&their_features) { + return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); + } } - // Now update local state: - if force_holding_cell { - self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC { - amount_msat, - payment_hash, - cltv_expiry, - source, - onion_routing_packet, - }); - return Ok(None); - } + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + }; - self.context.pending_outbound_htlcs.push(OutboundHTLCOutput { - htlc_id: self.context.next_holder_htlc_id, - amount_msat, - payment_hash: payment_hash.clone(), - cltv_expiry, - state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())), - source, - }); + let temporary_channel_id = entropy_source.get_secure_random_bytes(); - let res = msgs::UpdateAddHTLC { - channel_id: self.context.channel_id, - htlc_id: self.context.next_holder_htlc_id, - amount_msat, - payment_hash, - cltv_expiry, - onion_routing_packet, - }; - self.context.next_holder_htlc_id += 1; + Ok(Channel { + context: ChannelContext { + user_id, - Ok(Some(res)) - } + config: LegacyChannelConfig { + options: config.channel_config.clone(), + announced_channel: config.channel_handshake_config.announced_channel, + commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + }, - fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger { - log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); - // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we - // fail to generate this, we still are at least at a position where upgrading their status - // is acceptable. - for htlc in self.context.pending_inbound_htlcs.iter_mut() { - let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state { - Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) - } else { None }; - if let Some(state) = new_state { - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); - htlc.state = state; - } - } - for htlc in self.context.pending_outbound_htlcs.iter_mut() { - if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { - log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); - // Grab the preimage, if it exists, instead of cloning - let mut reason = OutboundHTLCOutcome::Success(None); - mem::swap(outcome, &mut reason); - htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason); - } - } - if let Some((feerate, update_state)) = self.context.pending_update_fee { - if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { - debug_assert!(!self.context.is_outbound()); - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); - self.context.feerate_per_kw = feerate; - self.context.pending_update_fee = None; - } - } - self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst; + prev_config: None, - let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger); - let htlcs: Vec<(HTLCOutputInCommitment, Option>)> = - htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); + inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), - if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent { - self.context.announcement_sigs_state = AnnouncementSigsState::Committed; - } + channel_id: temporary_channel_id, + temporary_channel_id: Some(temporary_channel_id), + channel_state: ChannelState::OurInitSent as u32, + announcement_sigs_state: AnnouncementSigsState::NotSent, + secp_ctx, + channel_value_satoshis, - self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { - commitment_txid: counterparty_commitment_txid, - htlc_outputs: htlcs.clone(), - commitment_number: self.context.cur_counterparty_commitment_transaction_number, - their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap() - }] - }; - self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; - monitor_update - } + latest_monitor_update_id: 0, - fn build_commitment_no_state_update(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { - let counterparty_keys = self.context.build_remote_transaction_keys(); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); - let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); + holder_signer, + shutdown_scriptpubkey, + destination_script, - #[cfg(any(test, fuzzing))] - { - if !self.context.is_outbound() { - let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); - *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; - if let Some(info) = projected_commit_tx_info { - let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); - if info.total_pending_htlcs == total_pending_htlcs - && info.next_holder_htlc_id == self.context.next_holder_htlc_id - && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id - && info.feerate == self.context.feerate_per_kw { - let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors()); - assert_eq!(actual_fee, info.fee); - } - } - } - } + cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + value_to_self_msat, - (counterparty_commitment_txid, commitment_stats.htlcs_included) - } + pending_inbound_htlcs: Vec::new(), + pending_outbound_htlcs: Vec::new(), + holding_cell_htlc_updates: Vec::new(), + pending_update_fee: None, + holding_cell_update_fee: None, + next_holder_htlc_id: 0, + next_counterparty_htlc_id: 0, + update_time_counter: 1, - /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed - /// generation when we shouldn't change HTLC/channel state. - fn send_commitment_no_state_update(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger { - // Get the fee tests from `build_commitment_no_state_update` - #[cfg(any(test, fuzzing))] - self.build_commitment_no_state_update(logger); + resend_order: RAACommitmentOrder::CommitmentFirst, - let counterparty_keys = self.context.build_remote_transaction_keys(); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); - let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); - let (signature, htlc_signatures); + monitor_pending_channel_ready: false, + monitor_pending_revoke_and_ack: false, + monitor_pending_commitment_signed: false, + monitor_pending_forwards: Vec::new(), + monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), - { - let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len()); - for &(ref htlc, _) in commitment_stats.htlcs_included.iter() { - htlcs.push(htlc); - } + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?; - signature = res.0; - htlc_signatures = res.1; + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", - encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), - &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), - log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id())); + inbound_awaiting_accept: false, - for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { - log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", - encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), - encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)), - log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), - log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id())); - } - } + funding_tx_confirmed_in: None, + funding_tx_confirmation_height: 0, + short_channel_id: None, + channel_creation_height: current_chain_height, - Ok((msgs::CommitmentSigned { - channel_id: self.context.channel_id, - signature, - htlc_signatures, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) - } + feerate_per_kw: feerate, + counterparty_dust_limit_satoshis: 0, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, + counterparty_max_htlc_value_in_flight_msat: 0, + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel + holder_selected_channel_reserve_satoshis, + counterparty_htlc_minimum_msat: 0, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + counterparty_max_accepted_htlcs: 0, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), + minimum_depth: None, // Filled in in accept_channel - /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment - /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go. - /// - /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on - /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info. - pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { - let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger); - if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } } - match send_res? { - Some(_) => { - let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); - Ok(self.push_ret_blockable_mon_update(monitor_update)) - }, - None => Ok(None) - } - } + counterparty_forwarding_info: None, - pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> { - if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 { - return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string())); - } - self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo { - fee_base_msat: msg.contents.fee_base_msat, - fee_proportional_millionths: msg.contents.fee_proportional_millionths, - cltv_expiry_delta: msg.contents.cltv_expiry_delta - }); + channel_transaction_parameters: ChannelTransactionParameters { + holder_pubkeys: pubkeys, + holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, + is_outbound_from_holder: true, + counterparty_parameters: None, + funding_outpoint: None, + opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, + opt_non_zero_fee_anchors: None + }, + funding_transaction: None, - Ok(()) - } + counterparty_cur_commitment_point: None, + counterparty_prev_commitment_point: None, + counterparty_node_id, - /// Begins the shutdown process, getting a message for the remote peer and returning all - /// holding cell HTLCs for payment failure. - /// - /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no - /// [`ChannelMonitorUpdate`] will be returned). - pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, - target_feerate_sats_per_kw: Option, override_shutdown_script: Option) - -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError> - where SP::Target: SignerProvider { - for htlc in self.context.pending_outbound_htlcs.iter() { - if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { - return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()}); - } - } - if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 { - if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 { - return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()}); - } - else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 { - return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); - } - } - if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { - return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()}); - } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { - return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); - } + counterparty_shutdown_scriptpubkey: None, - // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown - // script is set, we just force-close and call it a day. - let mut chan_closed = false; - if self.context.channel_state < ChannelState::FundingSent as u32 { - chan_closed = true; - } + commitment_secrets: CounterpartyCommitmentSecrets::new(), - let update_shutdown_script = match self.context.shutdown_scriptpubkey { - Some(_) => false, - None if !chan_closed => { - // use override shutdown script if provided - let shutdown_scriptpubkey = match override_shutdown_script { - Some(script) => script, - None => { - // otherwise, use the shutdown scriptpubkey provided by the signer - match signer_provider.get_shutdown_scriptpubkey() { - Ok(scriptpubkey) => scriptpubkey, - Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}), - } - }, - }; - if !shutdown_scriptpubkey.is_compatible(their_features) { - return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); - } - self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); - true - }, - None => false, - }; + channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, - // From here on out, we may not fail! - self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - if self.context.channel_state < ChannelState::FundingSent as u32 { - self.context.channel_state = ChannelState::ShutdownComplete as u32; - } else { - self.context.channel_state |= ChannelState::LocalShutdownSent as u32; - } - self.context.update_time_counter += 1; + announcement_sigs: None, - let monitor_update = if update_shutdown_script { - self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::ShutdownScript { - scriptpubkey: self.get_closing_scriptpubkey(), - }], - }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - if self.push_blockable_mon_update(monitor_update) { - self.context.pending_monitor_updates.last().map(|upd| &upd.update) - } else { None } - } else { None }; - let shutdown = msgs::Shutdown { - channel_id: self.context.channel_id, - scriptpubkey: self.get_closing_scriptpubkey(), - }; + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), - // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send - // our shutdown until we've committed all of the pending changes. - self.context.holding_cell_update_fee = None; - let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); - self.context.holding_cell_htlc_updates.retain(|htlc_update| { - match htlc_update { - &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { - dropped_outbound_htlcs.push((source.clone(), payment_hash.clone())); - false - }, - _ => true - } - }); + workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, - debug_assert!(!self.is_shutdown() || monitor_update.is_none(), - "we can't both complete shutdown and return a monitor update"); + latest_inbound_scid_alias: None, + outbound_scid_alias, - Ok((shutdown, monitor_update, dropped_outbound_htlcs)) - } + channel_pending_event_emitted: false, + channel_ready_event_emitted: false, - /// Gets the latest commitment transaction and any dependent transactions for relay (forcing - /// shutdown of this channel - no more calls into this Channel may be made afterwards except - /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). - /// Also returns the list of payment_hashes for channels which we can safely fail backwards - /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { - // Note that we MUST only generate a monitor update that indicates force-closure - we're - // called during initialization prior to the chain_monitor in the encompassing ChannelManager - // being fully configured in some cases. Thus, its likely any monitor events we generate will - // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. - assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32); + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills: HashSet::new(), - // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and - // return them to fail the payment. - let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); - let counterparty_node_id = self.context.get_counterparty_node_id(); - for htlc_update in self.context.holding_cell_htlc_updates.drain(..) { - match htlc_update { - HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { - dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id)); - }, - _ => {} - } - } - let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() { - // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), - // returning a channel monitor update here would imply a channel monitor update before - // we even registered the channel monitor to begin with, which is invalid. - // Thus, if we aren't actually at a point where we could conceivably broadcast the - // funding transaction, don't return a funding txo (which prevents providing the - // monitor update to the user, even if we return one). - // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. - if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { - self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; - Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], - })) - } else { None } - } else { None }; + channel_type, + channel_keys_id, - self.context.channel_state = ChannelState::ShutdownComplete as u32; - self.context.update_time_counter += 1; - (monitor_update, dropped_outbound_htlcs) + pending_monitor_updates: Vec::new(), + } + }) } +} - pub fn inflight_htlc_sources(&self) -> impl Iterator { - self.context.holding_cell_htlc_updates.iter() - .flat_map(|htlc_update| { - match htlc_update { - HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } - => Some((source, payment_hash)), - _ => None, - } - }) - .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash))) - } +/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. +pub(super) struct InboundV1Channel { + pub context: ChannelContext, } -/// A not-yet-funded outbound (from holder) channel using V1 channel establishment. -pub(super) struct OutboundV1Channel { - pub context: ChannelContext, -} +impl InboundV1Channel { + /// Creates a new channel from a remote sides' request for one. + /// Assumes chain_hash has already been checked and corresponds with what we expect! + pub fn new_from_req( + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, + counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, + their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, + current_chain_height: u32, logger: &L, outbound_scid_alias: u64 + ) -> Result, ChannelError> + where ES::Target: EntropySource, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + L::Target: Logger, + { + let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false }; + + // First check the channel type is known, failing before we do anything else if we don't + // support this channel type. + let channel_type = if let Some(channel_type) = &msg.channel_type { + if channel_type.supports_any_optional_bits() { + return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned())); + } + + // We only support the channel types defined by the `ChannelManager` in + // `provided_channel_type_features`. The channel type must always support + // `static_remote_key`. + if !channel_type.requires_static_remote_key() { + return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned())); + } + // Make sure we support all of the features behind the channel type. + if !channel_type.is_subset(our_supported_features) { + return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned())); + } + if channel_type.requires_scid_privacy() && announced_channel { + return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned())); + } + channel_type.clone() + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + } + channel_type + }; + let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx(); + + let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id); + let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id); + let pubkeys = holder_signer.pubkeys().clone(); + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: msg.funding_pubkey, + revocation_basepoint: msg.revocation_basepoint, + payment_point: msg.payment_point, + delayed_payment_basepoint: msg.delayed_payment_basepoint, + htlc_basepoint: msg.htlc_basepoint + }; -impl OutboundV1Channel { - fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { - // The default channel type (ie the first one we try) depends on whether the channel is - // public - if it is, we just go with `only_static_remotekey` as it's the only option - // available. If it's private, we first try `scid_privacy` as it provides better privacy - // with no other changes, and fall back to `only_static_remotekey`. - let mut ret = ChannelTypeFeatures::only_static_remote_key(); - if !config.channel_handshake_config.announced_channel && - config.channel_handshake_config.negotiate_scid_privacy && - their_features.supports_scid_privacy() { - ret.set_scid_privacy_required(); + if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT { + return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT))); } - // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we - // set it now. If they don't understand it, we'll fall back to our default of - // `only_static_remotekey`. - #[cfg(anchors)] - { // Attributes are not allowed on if expressions on our current MSRV of 1.41. - if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && - their_features.supports_anchors_zero_fee_htlc_tx() { - ret.set_anchors_zero_fee_htlc_tx_required(); - } + // Check sanity of message fields: + if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis { + return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis))); } + if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis))); + } + if msg.channel_reserve_satoshis > msg.funding_satoshis { + return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis))); + } + let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; + if msg.push_msat > full_channel_value_msat { + return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat))); + } + if msg.dust_limit_satoshis > msg.funding_satoshis { + return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis))); + } + if msg.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); + } + Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?; - ret - } - - pub fn new_outbound( - fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, - channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64 - ) -> Result, APIError> - where ES::Target: EntropySource, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - { - let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; - let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); - let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); - let pubkeys = holder_signer.pubkeys().clone(); + let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if msg.to_self_delay > max_counterparty_selected_contest_delay { + return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay))); + } + if msg.max_accepted_htlcs < 1 { + return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if msg.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); + } - if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { - return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); + // Now check against optional parameters as set by config... + if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis { + return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis))); } - if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); + if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat { + return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat))); } - let channel_value_msat = channel_value_satoshis * 1000; - if push_msat > channel_value_msat { - return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); + if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat))); } - if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { - return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); + if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis))); } - let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); + if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs { + return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs))); + } + if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } + + // Convert things into internal flags and prep our state: + + if config.channel_handshake_limits.force_announced_channel_preference { + if config.channel_handshake_config.announced_channel != announced_channel { + return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned())); + } + } + + let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); + return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat { + return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat))); + } + if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.", + msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS); + } + if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis { + return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis))); } - let channel_type = Self::get_initial_channel_type(&config, their_features); - debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); - - let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + // check if the funder's amount for the initial commitment tx is sufficient + // for full fee payment plus a few HTLCs to ensure the channel will be useful. + let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat; + let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000; + if funders_amount_msat / 1000 < commitment_tx_fee { + return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee))); + } - let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; - let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); - if value_to_self_msat < commitment_tx_fee { - return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); + let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee; + // While it's reasonable for us to not meet the channel reserve initially (if they don't + // want to push much to us), our counterparty should always have more than our reserve. + if to_remote_satoshis < holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned())); } - let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &msg.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))) + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { match signer_provider.get_shutdown_scriptpubkey() { Ok(scriptpubkey) => Some(scriptpubkey), - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), + Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())), } } else { None }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); + return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); } } let destination_script = match signer_provider.get_destination_script() { Ok(script) => script, - Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())), }; - let temporary_channel_id = entropy_source.get_secure_random_bytes(); + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - Ok(Channel { + let chan = Channel { context: ChannelContext { user_id, config: LegacyChannelConfig { options: config.channel_config.clone(), - announced_channel: config.channel_handshake_config.announced_channel, + announced_channel, commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, }, prev_config: None, - inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), + inbound_handshake_limits_override: None, - channel_id: temporary_channel_id, - temporary_channel_id: Some(temporary_channel_id), - channel_state: ChannelState::OurInitSent as u32, + temporary_channel_id: Some(msg.temporary_channel_id), + channel_id: msg.temporary_channel_id, + channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, - channel_value_satoshis, latest_monitor_update_id: 0, @@ -6343,7 +6344,7 @@ impl OutboundV1Channel { cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat, + value_to_self_msat: msg.push_msat, pending_inbound_htlcs: Vec::new(), pending_outbound_htlcs: Vec::new(), @@ -6364,53 +6365,57 @@ impl OutboundV1Channel { monitor_pending_finalized_fulfills: Vec::new(), #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), last_sent_closing_fee: None, pending_counterparty_closing_signed: None, closing_fee_limits: None, target_closing_feerate_sats_per_kw: None, - inbound_awaiting_accept: false, + inbound_awaiting_accept: true, funding_tx_confirmed_in: None, funding_tx_confirmation_height: 0, short_channel_id: None, channel_creation_height: current_chain_height, - feerate_per_kw: feerate, - counterparty_dust_limit_satoshis: 0, + feerate_per_kw: msg.feerate_per_kw, + channel_value_satoshis: msg.funding_satoshis, + counterparty_dust_limit_satoshis: msg.dust_limit_satoshis, holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: 0, - holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel + counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000), + holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis), holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: 0, + counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: 0, + counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), - minimum_depth: None, // Filled in in accept_channel + minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)), counterparty_forwarding_info: None, channel_transaction_parameters: ChannelTransactionParameters { holder_pubkeys: pubkeys, holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: true, - counterparty_parameters: None, + is_outbound_from_holder: false, + counterparty_parameters: Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: msg.to_self_delay, + pubkeys: counterparty_pubkeys, + }), funding_outpoint: None, - opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, + opt_anchors: if opt_anchors { Some(()) } else { None }, opt_non_zero_fee_anchors: None }, funding_transaction: None, - counterparty_cur_commitment_point: None, + counterparty_cur_commitment_point: Some(msg.first_per_commitment_point), counterparty_prev_commitment_point: None, counterparty_node_id, - counterparty_shutdown_scriptpubkey: None, + counterparty_shutdown_scriptpubkey, commitment_secrets: CounterpartyCommitmentSecrets::new(), @@ -6441,17 +6446,12 @@ impl OutboundV1Channel { pending_monitor_updates: Vec::new(), } - }) - } -} + }; -/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. -pub(super) struct InboundV1Channel { - pub context: ChannelContext, + Ok(chan) + } } -impl InboundV1Channel {} - const SERIALIZATION_VERSION: u8 = 3; const MIN_SERIALIZATION_VERSION: u8 = 2; @@ -7295,7 +7295,7 @@ mod tests { use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; #[cfg(anchors)] use crate::ln::channel::InitFeatures; - use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; + use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; @@ -7459,7 +7459,7 @@ mod tests { // Make sure A's dust limit is as we expect. let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0); @@ -7575,7 +7575,7 @@ mod tests { // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); // Node B --> Node A: accept channel let accept_channel_msg = node_b_chan.accept_inbound_channel(0); @@ -7647,12 +7647,12 @@ mod tests { // Test that `new_from_req` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound - 1 (2%) of the `channel_value`. - let chan_3 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap(); + let chan_3 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap(); let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000; assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_4 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap(); + let chan_4 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap(); let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000; assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); @@ -7671,14 +7671,14 @@ mod tests { // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_7 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap(); + let chan_7 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap(); let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000; assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64); // Test that `new_from_req` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_8 = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap(); + let chan_8 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap(); let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000; assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat); } @@ -7728,7 +7728,7 @@ mod tests { inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 { - let chan_inbound_node = Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap(); + let chan_inbound_node = InboundV1Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap(); let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64); @@ -7736,7 +7736,7 @@ mod tests { assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve); } else { // Channel Negotiations failed - let result = Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42); + let result = InboundV1Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42); assert!(result.is_err()); } } @@ -8560,7 +8560,7 @@ mod tests { let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); open_channel_msg.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let res = Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, + let res = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42); assert!(res.is_ok()); @@ -8602,7 +8602,7 @@ mod tests { ).unwrap(); let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); - let channel_b = Channel::::new_from_req( + let channel_b = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8645,7 +8645,7 @@ mod tests { // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts // `static_remote_key`, it will fail the channel. - let channel_b = Channel::::new_from_req( + let channel_b = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors, &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8689,7 +8689,7 @@ mod tests { let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); - let res = Channel::::new_from_req( + let res = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &simple_anchors_init, &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8707,7 +8707,7 @@ mod tests { let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); - let channel_b = Channel::::new_from_req( + let channel_b = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42 diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 78265a0b7f1..e8c767204b9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel}; +use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; @@ -4901,7 +4901,7 @@ where msg.temporary_channel_id.clone())); } - let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, + let mut channel = match InboundV1Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias) { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 6af81108f27..bfab15f314c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -20,9 +20,9 @@ use crate::chain::transaction::OutPoint; use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; -use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel}; +use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, Channel, ChannelError}; +use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; @@ -6961,11 +6961,11 @@ fn test_user_configurable_csv_delay() { } } else { assert!(false) } - // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req() + // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new_from_req() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger, 42) { @@ -6993,11 +6993,11 @@ fn test_user_configurable_csv_delay() { } else { panic!(); } check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }); - // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req() + // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new_from_req() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger, 42) { From 2ea27e02cd7657c31aa29e1a378743ff7de35972 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 17:31:28 +0200 Subject: [PATCH 19/25] Move `Channel::force_shutdown` to `ChannelContext` impl --- lightning/src/ln/channel.rs | 92 +++++++++++++++--------------- lightning/src/ln/channelmanager.rs | 16 +++--- 2 files changed, 54 insertions(+), 54 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7e983fa7788..bedaa576e1f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1874,6 +1874,52 @@ impl ChannelContext { None } } + + /// Gets the latest commitment transaction and any dependent transactions for relay (forcing + /// shutdown of this channel - no more calls into this Channel may be made afterwards except + /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). + /// Also returns the list of payment_hashes for channels which we can safely fail backwards + /// immediately (others we will have to allow to time out). + pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { + // Note that we MUST only generate a monitor update that indicates force-closure - we're + // called during initialization prior to the chain_monitor in the encompassing ChannelManager + // being fully configured in some cases. Thus, its likely any monitor events we generate will + // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. + assert!(self.channel_state != ChannelState::ShutdownComplete as u32); + + // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and + // return them to fail the payment. + let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len()); + let counterparty_node_id = self.get_counterparty_node_id(); + for htlc_update in self.holding_cell_htlc_updates.drain(..) { + match htlc_update { + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { + dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id)); + }, + _ => {} + } + } + let monitor_update = if let Some(funding_txo) = self.get_funding_txo() { + // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), + // returning a channel monitor update here would imply a channel monitor update before + // we even registered the channel monitor to begin with, which is invalid. + // Thus, if we aren't actually at a point where we could conceivably broadcast the + // funding transaction, don't return a funding txo (which prevents providing the + // monitor update to the user, even if we return one). + // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. + if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { + self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; + Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { + update_id: self.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], + })) + } else { None } + } else { None }; + + self.channel_state = ChannelState::ShutdownComplete as u32; + self.update_time_counter += 1; + (monitor_update, dropped_outbound_htlcs) + } } // Internal utility functions for channels @@ -5823,52 +5869,6 @@ impl Channel { Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } - /// Gets the latest commitment transaction and any dependent transactions for relay (forcing - /// shutdown of this channel - no more calls into this Channel may be made afterwards except - /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). - /// Also returns the list of payment_hashes for channels which we can safely fail backwards - /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { - // Note that we MUST only generate a monitor update that indicates force-closure - we're - // called during initialization prior to the chain_monitor in the encompassing ChannelManager - // being fully configured in some cases. Thus, its likely any monitor events we generate will - // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. - assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32); - - // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and - // return them to fail the payment. - let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len()); - let counterparty_node_id = self.context.get_counterparty_node_id(); - for htlc_update in self.context.holding_cell_htlc_updates.drain(..) { - match htlc_update { - HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { - dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id)); - }, - _ => {} - } - } - let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() { - // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), - // returning a channel monitor update here would imply a channel monitor update before - // we even registered the channel monitor to begin with, which is invalid. - // Thus, if we aren't actually at a point where we could conceivably broadcast the - // funding transaction, don't return a funding txo (which prevents providing the - // monitor update to the user, even if we return one). - // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. - if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { - self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; - Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], - })) - } else { None } - } else { None }; - - self.context.channel_state = ChannelState::ShutdownComplete as u32; - self.context.update_time_counter += 1; - (monitor_update, dropped_outbound_htlcs) - } - pub fn inflight_htlc_sources(&self) -> impl Iterator { self.context.holding_cell_htlc_updates.iter() .flat_map(|htlc_update| { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e8c767204b9..39ff93dd066 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1646,7 +1646,7 @@ macro_rules! convert_chan_err { ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, $channel); - let shutdown_res = $channel.force_shutdown(true); + let shutdown_res = $channel.context.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, @@ -1813,7 +1813,7 @@ macro_rules! handle_new_monitor_update { update_maps_on_chan_removal!($self, $chan); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), - $chan.context.get_user_id(), $chan.force_shutdown(false), + $chan.context.get_user_id(), $chan.context.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok())); $remove; res @@ -2345,7 +2345,7 @@ where } }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.finish_force_close_channel(chan.force_shutdown(broadcast)); + self.finish_force_close_channel(chan.context.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut peer_state = peer_state_mutex.lock().unwrap(); peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -3106,7 +3106,7 @@ where let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None) + MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.context.force_shutdown(true), None) } else { unreachable!(); }); match funding_res { Ok(funding_msg) => (funding_msg, chan), @@ -5686,7 +5686,7 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { let mut chan = remove_channel!(self, chan_entry); - failed_channels.push(chan.force_shutdown(false)); + failed_channels.push(chan.context.force_shutdown(false)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -6533,7 +6533,7 @@ where update_maps_on_chan_removal!(self, channel); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. - failed_channels.push(channel.force_shutdown(true)); + failed_channels.push(channel.context.force_shutdown(true)); if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -7963,7 +7963,7 @@ where log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); - let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); + let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true); if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update @@ -8021,7 +8021,7 @@ where // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.force_shutdown(false); + let _ = channel.context.force_shutdown(false); channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), From 4ad67cfe183bf5808f102e98c474fd45fb0a206c Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 19:22:59 +0200 Subject: [PATCH 20/25] Refactor channel map update macros for use with `ChannelContext` --- lightning/src/ln/channelmanager.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 39ff93dd066..bebc1a8896e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1614,10 +1614,10 @@ macro_rules! handle_error { } macro_rules! update_maps_on_chan_removal { - ($self: expr, $channel: expr) => {{ - $self.id_to_peer.lock().unwrap().remove(&$channel.context.channel_id()); + ($self: expr, $channel_context: expr) => {{ + $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id()); let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = $channel.context.get_short_channel_id() { + if let Some(short_id) = $channel_context.get_short_channel_id() { short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the @@ -1626,10 +1626,10 @@ macro_rules! update_maps_on_chan_removal { // also don't want a counterparty to be able to trivially cause a memory leak by simply // opening a million channels with us which are closed before we ever reach the funding // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.context.outbound_scid_alias()); + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias()); debug_assert!(alias_removed); } - short_to_chan_info.remove(&$channel.context.outbound_scid_alias()); + short_to_chan_info.remove(&$channel_context.outbound_scid_alias()); }} } @@ -1645,7 +1645,7 @@ macro_rules! convert_chan_err { }, ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $channel); + update_maps_on_chan_removal!($self, &$channel.context); let shutdown_res = $channel.context.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) @@ -1688,7 +1688,7 @@ macro_rules! remove_channel { ($self: expr, $entry: expr) => { { let channel = $entry.remove_entry().1; - update_maps_on_chan_removal!($self, channel); + update_maps_on_chan_removal!($self, &channel.context); channel } } @@ -1810,7 +1810,7 @@ macro_rules! handle_new_monitor_update { ChannelMonitorUpdateStatus::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan.context.channel_id()[..])); - update_maps_on_chan_removal!($self, $chan); + update_maps_on_chan_removal!($self, &$chan.context); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), $chan.context.get_user_id(), $chan.context.force_shutdown(false), @@ -2798,6 +2798,7 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } + fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id())); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; @@ -5826,7 +5827,7 @@ where log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); - update_maps_on_chan_removal!(self, chan); + update_maps_on_chan_removal!(self, &chan.context); false } else { true } }, @@ -6530,7 +6531,7 @@ where } } } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, channel); + update_maps_on_chan_removal!(self, &channel.context); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. failed_channels.push(channel.context.force_shutdown(true)); @@ -6790,7 +6791,7 @@ where peer_state.channel_by_id.retain(|_, chan| { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { - update_maps_on_chan_removal!(self, chan); + update_maps_on_chan_removal!(self, &chan.context); self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); return false; } From 4b1e2865cf3c77d53f63aa9a8e4518c25c385fce Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 19:52:21 +0200 Subject: [PATCH 21/25] Create and use methods for counting channels This commit also adds two new maps to `PeerState` for keeping track of `OutboundV1Channel`s and `InboundV1Channel`s so that further commits are a bit easier to review. --- lightning/src/ln/channelmanager.rs | 85 +++++++++++++++-------- lightning/src/ln/functional_test_utils.rs | 22 ++++++ 2 files changed, 79 insertions(+), 28 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bebc1a8896e..e07c26eaf0f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -613,6 +613,18 @@ pub(super) struct PeerState { /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the /// `channel_id`. pub(super) channel_by_id: HashMap<[u8; 32], Channel>, + /// `temporary_channel_id` -> `OutboundV1Channel`. + /// + /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has + /// been assigned a `channel_id`, the entry in this map is removed and one is created in + /// `channel_by_id`. + pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel>, + /// `temporary_channel_id` -> `InboundV1Channel`. + /// + /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has + /// been assigned a `channel_id`, the entry in this map is removed and one is created in + /// `channel_by_id`. + pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel>, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, /// Messages to send to the peer - pushed to in the same lock that they are generated in (except @@ -654,6 +666,20 @@ impl PeerState { } self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty() } + + // Returns a count of all channels we have with this peer, including pending channels. + fn total_channel_count(&self) -> usize { + self.channel_by_id.len() + + self.outbound_v1_channel_by_id.len() + + self.inbound_v1_channel_by_id.len() + } + + // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer. + fn has_channel(&self, channel_id: &[u8; 32]) -> bool { + self.channel_by_id.contains_key(channel_id) || + self.outbound_v1_channel_by_id.contains_key(channel_id) || + self.inbound_v1_channel_by_id.contains_key(channel_id) + } } /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is @@ -4765,13 +4791,14 @@ where fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty()); + let peers_without_funded_channels = + self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 }); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let is_only_peer_channel = peer_state.channel_by_id.len() == 1; + let is_only_peer_channel = peer_state.total_channel_count() == 1; match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut channel) => { if !channel.get().inbound_is_awaiting_accept() { @@ -4833,7 +4860,7 @@ where let peer = peer_mtx.lock().unwrap(); if !maybe_count_peer(&*peer) { continue; } let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height); - if num_unfunded_channels == peer.channel_by_id.len() { + if num_unfunded_channels == peer.total_channel_count() { peers_without_funded_channels += 1; } } @@ -4912,33 +4939,31 @@ where }, Ok(res) => res }; - match peer_state.channel_by_id.entry(channel.context.channel_id()) { - hash_map::Entry::Occupied(_) => { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) - }, - hash_map::Entry::Vacant(entry) => { - if !self.default_configuration.manually_accept_inbound_channels { - if channel.context.get_channel_type().requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); - } - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(user_channel_id), - }); - } else { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), - counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, - push_msat: msg.push_msat, - channel_type: channel.context.get_channel_type().clone(), - }, None)); + let channel_id = channel.context.channel_id(); + let channel_exists = peer_state.has_channel(&channel_id); + if channel_exists { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) + } else { + if !self.default_configuration.manually_accept_inbound_channels { + if channel.context.get_channel_type().requires_zero_conf() { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - - entry.insert(channel); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: counterparty_node_id.clone(), + msg: channel.accept_inbound_channel(user_channel_id), + }); + } else { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::OpenChannelRequest { + temporary_channel_id: msg.temporary_channel_id.clone(), + counterparty_node_id: counterparty_node_id.clone(), + funding_satoshis: msg.funding_satoshis, + push_msat: msg.push_msat, + channel_type: channel.context.get_channel_type().clone(), + }, None)); } + peer_state.channel_by_id.insert(channel_id, channel); } Ok(()) } @@ -6878,6 +6903,8 @@ where } e.insert(Mutex::new(PeerState { channel_by_id: HashMap::new(), + outbound_v1_channel_by_id: HashMap::new(), + inbound_v1_channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), monitor_update_blocked_actions: BTreeMap::new(), @@ -8081,6 +8108,8 @@ where let peer_pubkey = Readable::read(reader)?; let peer_state = PeerState { channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), + outbound_v1_channel_by_id: HashMap::new(), + inbound_v1_channel_by_id: HashMap::new(), latest_features: Readable::read(reader)?, pending_msg_events: Vec::new(), monitor_update_blocked_actions: BTreeMap::new(), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a582836a70e..7209c4a0b83 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -783,6 +783,28 @@ macro_rules! get_channel_ref { } } +#[cfg(test)] +macro_rules! get_inbound_v1_channel_ref { + ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { + { + $per_peer_state_lock = $node.node.per_peer_state.read().unwrap(); + $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + $peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap() + } + } +} + +#[cfg(test)] +macro_rules! get_outbound_v1_channel_ref { + ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { + { + $per_peer_state_lock = $node.node.per_peer_state.read().unwrap(); + $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + $peer_state_lock.outbound_v1_channel_by_id.get_mut(&$channel_id).unwrap() + } + } +} + #[cfg(test)] macro_rules! get_feerate { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { From 4a0cd5cf55df01ed7614d51c721afc4b34fb5b01 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 18:09:23 +0200 Subject: [PATCH 22/25] Move outbound channel methods into `OutboundV1Channel`'s impl --- lightning/src/ln/channel.rs | 728 ++++++++++++---------- lightning/src/ln/channelmanager.rs | 153 +++-- lightning/src/ln/functional_test_utils.rs | 11 - lightning/src/ln/functional_tests.rs | 28 +- lightning/src/ln/payment_tests.rs | 8 +- 5 files changed, 530 insertions(+), 398 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index bedaa576e1f..e1f3e85ff46 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -374,6 +374,16 @@ impl fmt::Debug for ChannelError { } } +impl fmt::Display for ChannelError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + &ChannelError::Ignore(ref e) => write!(f, "{}", e), + &ChannelError::Warn(ref e) => write!(f, "{}", e), + &ChannelError::Close(ref e) => write!(f, "{}", e), + } + } +} + macro_rules! secp_check { ($res: expr, $err: expr) => { match $res { @@ -2000,36 +2010,6 @@ struct CommitmentTxInfoCached { } impl Channel { - /// If we receive an error message, it may only be a rejection of the channel type we tried, - /// not of our ability to open any channel at all. Thus, on error, we should first call this - /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. - pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result { - if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } - if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { - // We've exhausted our options - return Err(()); - } - // We support opening a few different types of channels. Try removing our additional - // features one by one until we've either arrived at our default or the counterparty has - // accepted one. - // - // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the - // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type` - // checks whether the counterparty supports every feature, this would only happen if the - // counterparty is advertising the feature, but rejecting channels proposing the feature for - // whatever reason. - if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { - self.context.channel_type.clear_anchors_zero_fee_htlc_tx(); - assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none()); - self.context.channel_transaction_parameters.opt_anchors = None; - } else if self.context.channel_type.supports_scid_privacy() { - self.context.channel_type.clear_scid_privacy(); - } else { - self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); - } - Ok(self.get_open_channel(chain_hash)) - } - // Constructors: fn check_remote_fee(fee_estimator: &LowerBoundedFeeEstimator, @@ -2442,135 +2422,6 @@ impl Channel { // Message handlers: - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; - - // Check sanity of message fields: - if !self.context.is_outbound() { - return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); - } - if self.context.channel_state != ChannelState::OurInitSent as u32 { - return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned())); - } - if msg.dust_limit_satoshis > 21000000 * 100000000 { - return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); - } - if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); - } - let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); - } - let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.to_self_delay > max_delay_acceptable { - return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay))); - } - if msg.max_accepted_htlcs < 1 { - return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); - } - if msg.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); - } - - // Now check against optional parameters as set by config... - if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { - return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); - } - if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { - return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); - } - if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { - return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); - } - if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.minimum_depth > peer_limits.max_minimum_depth { - return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth))); - } - - if let Some(ty) = &msg.channel_type { - if *ty != self.context.channel_type { - return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); - } - } else if their_features.supports_channel_type() { - // Assume they've accepted the channel type as they said they understand it. - } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); - } - self.context.channel_type = channel_type; - } - - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } - } - } else { None }; - - self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; - self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); - self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; - self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; - - if peer_limits.trust_own_funding_0conf { - self.context.minimum_depth = Some(msg.minimum_depth); - } else { - self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); - } - - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.funding_pubkey, - revocation_basepoint: msg.revocation_basepoint, - payment_point: msg.payment_point, - delayed_payment_basepoint: msg.delayed_payment_basepoint, - htlc_basepoint: msg.htlc_basepoint - }; - - self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.to_self_delay, - pubkeys: counterparty_pubkeys, - }); - - self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); - self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; - - self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; - self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. - - Ok(()) - } - fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { let funding_script = self.context.get_funding_redeemscript(); @@ -5087,48 +4938,6 @@ impl Channel { // Methods to get unprompted messages to send to the remote end (or where we already returned // something in the handler for the message that prompted this message): - pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { - if !self.context.is_outbound() { - panic!("Tried to open a channel for an inbound channel?"); - } - if self.context.channel_state != ChannelState::OurInitSent as u32 { - panic!("Cannot generate an open_channel after we've moved forward"); - } - - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Tried to send an open_channel for a channel that has already advanced"); - } - - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.context.get_holder_pubkeys(); - - msgs::OpenChannel { - chain_hash, - temporary_channel_id: self.context.channel_id, - funding_satoshis: self.context.channel_value_satoshis, - push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - feerate_per_kw: self.context.feerate_per_kw as u32, - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint, - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint, - htlc_basepoint: keys.htlc_basepoint, - first_per_commitment_point, - channel_flags: if self.context.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), - } - } - pub fn inbound_is_awaiting_accept(&self) -> bool { self.context.inbound_awaiting_accept } @@ -5206,89 +5015,29 @@ impl Channel { self.generate_accept_channel_message() } - /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) - fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) - } - - /// Updates channel state with knowledge of the funding transaction's txid/index, and generates - /// a funding_created message for the remote peer. - /// Panics if called at some time other than immediately after initial handshake, if called twice, - /// or if called on an inbound channel. - /// Note that channel_id changes during this call! - /// Do NOT broadcast the funding transaction until after a successful funding_signed call! - /// If an Err is returned, it is a ChannelError::Close. - pub fn get_outbound_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result where L::Target: Logger { - if !self.context.is_outbound() { - panic!("Tried to create outbound funding_created message on an inbound channel!"); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { - panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); + /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly + /// announceable and available for use (have exchanged ChannelReady messages in both + /// directions). Should be used for both broadcasted announcements and in response to an + /// AnnouncementSignatures message from the remote peer. + /// + /// Will only fail if we're not in a state where channel_announcement may be sent (including + /// closing). + /// + /// This will only return ChannelError::Ignore upon failure. + fn get_channel_announcement( + &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig, + ) -> Result where NS::Target: NodeSigner { + if !self.context.config.announced_channel { + return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + if !self.context.is_usable() { + return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned())); } - self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - - let signature = match self.get_outbound_funding_created_signature(logger) { - Ok(res) => res, - Err(e) => { - log_error!(logger, "Got bad signatures: {:?}!", e); - self.context.channel_transaction_parameters.funding_outpoint = None; - return Err(e); - } - }; - - let temporary_channel_id = self.context.channel_id; - - // Now that we're past error-generating stuff, update our local state: - - self.context.channel_state = ChannelState::FundingCreated as u32; - self.context.channel_id = funding_txo.to_channel_id(); - self.context.funding_transaction = Some(funding_transaction); - - Ok(msgs::FundingCreated { - temporary_channel_id, - funding_txid: funding_txo.txid, - funding_output_index: funding_txo.index, - signature, - #[cfg(taproot)] - partial_signature_with_nonce: None, - #[cfg(taproot)] - next_local_nonce: None, - }) - } - - /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly - /// announceable and available for use (have exchanged ChannelReady messages in both - /// directions). Should be used for both broadcasted announcements and in response to an - /// AnnouncementSignatures message from the remote peer. - /// - /// Will only fail if we're not in a state where channel_announcement may be sent (including - /// closing). - /// - /// This will only return ChannelError::Ignore upon failure. - fn get_channel_announcement( - &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig, - ) -> Result where NS::Target: NodeSigner { - if !self.context.config.announced_channel { - return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); - } - if !self.context.is_usable() { - return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned())); - } - - let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) - .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?); - let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id()); - let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice(); + let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) + .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?); + let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id()); + let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice(); let msg = msgs::UnsignedChannelAnnouncement { features: channelmanager::provided_channel_features(&user_config), @@ -5888,37 +5637,11 @@ pub(super) struct OutboundV1Channel { } impl OutboundV1Channel { - fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { - // The default channel type (ie the first one we try) depends on whether the channel is - // public - if it is, we just go with `only_static_remotekey` as it's the only option - // available. If it's private, we first try `scid_privacy` as it provides better privacy - // with no other changes, and fall back to `only_static_remotekey`. - let mut ret = ChannelTypeFeatures::only_static_remote_key(); - if !config.channel_handshake_config.announced_channel && - config.channel_handshake_config.negotiate_scid_privacy && - their_features.supports_scid_privacy() { - ret.set_scid_privacy_required(); - } - - // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we - // set it now. If they don't understand it, we'll fall back to our default of - // `only_static_remotekey`. - #[cfg(anchors)] - { // Attributes are not allowed on if expressions on our current MSRV of 1.41. - if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && - their_features.supports_anchors_zero_fee_htlc_tx() { - ret.set_anchors_zero_fee_htlc_tx_required(); - } - } - - ret - } - - pub fn new_outbound( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64 - ) -> Result, APIError> + ) -> Result, APIError> where ES::Target: EntropySource, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -5982,7 +5705,7 @@ impl OutboundV1Channel { let temporary_channel_id = entropy_source.get_secure_random_bytes(); - Ok(Channel { + Ok(Self { context: ChannelContext { user_id, @@ -6111,6 +5834,299 @@ impl OutboundV1Channel { } }) } + + /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) + fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) + } + + /// Updates channel state with knowledge of the funding transaction's txid/index, and generates + /// a funding_created message for the remote peer. + /// Panics if called at some time other than immediately after initial handshake, if called twice, + /// or if called on an inbound channel. + /// Note that channel_id changes during this call! + /// Do NOT broadcast the funding transaction until after a successful funding_signed call! + /// If an Err is returned, it is a ChannelError::Close. + pub fn get_outbound_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) + -> Result<(Channel, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger { + if !self.context.is_outbound() { + panic!("Tried to create outbound funding_created message on an inbound channel!"); + } + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + } + + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + + let signature = match self.get_outbound_funding_created_signature(logger) { + Ok(res) => res, + Err(e) => { + log_error!(logger, "Got bad signatures: {:?}!", e); + self.context.channel_transaction_parameters.funding_outpoint = None; + return Err((self, e)); + } + }; + + let temporary_channel_id = self.context.channel_id; + + // Now that we're past error-generating stuff, update our local state: + + self.context.channel_state = ChannelState::FundingCreated as u32; + self.context.channel_id = funding_txo.to_channel_id(); + self.context.funding_transaction = Some(funding_transaction); + + let channel = Channel { + context: self.context, + }; + + Ok((channel, msgs::FundingCreated { + temporary_channel_id, + funding_txid: funding_txo.txid, + funding_output_index: funding_txo.index, + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + #[cfg(taproot)] + next_local_nonce: None, + })) + } + + fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { + // The default channel type (ie the first one we try) depends on whether the channel is + // public - if it is, we just go with `only_static_remotekey` as it's the only option + // available. If it's private, we first try `scid_privacy` as it provides better privacy + // with no other changes, and fall back to `only_static_remotekey`. + let mut ret = ChannelTypeFeatures::only_static_remote_key(); + if !config.channel_handshake_config.announced_channel && + config.channel_handshake_config.negotiate_scid_privacy && + their_features.supports_scid_privacy() { + ret.set_scid_privacy_required(); + } + + // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we + // set it now. If they don't understand it, we'll fall back to our default of + // `only_static_remotekey`. + #[cfg(anchors)] + { // Attributes are not allowed on if expressions on our current MSRV of 1.41. + if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && + their_features.supports_anchors_zero_fee_htlc_tx() { + ret.set_anchors_zero_fee_htlc_tx_required(); + } + } + + ret + } + + /// If we receive an error message, it may only be a rejection of the channel type we tried, + /// not of our ability to open any channel at all. Thus, on error, we should first call this + /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. + pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result { + if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } + if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { + // We've exhausted our options + return Err(()); + } + // We support opening a few different types of channels. Try removing our additional + // features one by one until we've either arrived at our default or the counterparty has + // accepted one. + // + // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the + // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type` + // checks whether the counterparty supports every feature, this would only happen if the + // counterparty is advertising the feature, but rejecting channels proposing the feature for + // whatever reason. + if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + self.context.channel_type.clear_anchors_zero_fee_htlc_tx(); + assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none()); + self.context.channel_transaction_parameters.opt_anchors = None; + } else if self.context.channel_type.supports_scid_privacy() { + self.context.channel_type.clear_scid_privacy(); + } else { + self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); + } + Ok(self.get_open_channel(chain_hash)) + } + + pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { + if !self.context.is_outbound() { + panic!("Tried to open a channel for an inbound channel?"); + } + if self.context.channel_state != ChannelState::OurInitSent as u32 { + panic!("Cannot generate an open_channel after we've moved forward"); + } + + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Tried to send an open_channel for a channel that has already advanced"); + } + + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let keys = self.context.get_holder_pubkeys(); + + msgs::OpenChannel { + chain_hash, + temporary_channel_id: self.context.channel_id, + funding_satoshis: self.context.channel_value_satoshis, + push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + feerate_per_kw: self.context.feerate_per_kw as u32, + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint, + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint, + htlc_basepoint: keys.htlc_basepoint, + first_per_commitment_point, + channel_flags: if self.context.config.announced_channel {1} else {0}, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + } + } + + // Message handlers + pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { + let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; + + // Check sanity of message fields: + if !self.context.is_outbound() { + return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); + } + if self.context.channel_state != ChannelState::OurInitSent as u32 { + return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned())); + } + if msg.dust_limit_satoshis > 21000000 * 100000000 { + return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis))); + } + if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { + return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); + } + if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); + } + if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", + msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); + } + let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; + if msg.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); + } + let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if msg.to_self_delay > max_delay_acceptable { + return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay))); + } + if msg.max_accepted_htlcs < 1 { + return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if msg.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS))); + } + + // Now check against optional parameters as set by config... + if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { + return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); + } + if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); + } + if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { + return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); + } + if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { + return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); + } + if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } + if msg.minimum_depth > peer_limits.max_minimum_depth { + return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth))); + } + + if let Some(ty) = &msg.channel_type { + if *ty != self.context.channel_type { + return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); + } + } else if their_features.supports_channel_type() { + // Assume they've accepted the channel type as they said they understand it. + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + } + self.context.channel_type = channel_type; + } + + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &msg.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; + + self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; + self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); + self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); + self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; + self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; + + if peer_limits.trust_own_funding_0conf { + self.context.minimum_depth = Some(msg.minimum_depth); + } else { + self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); + } + + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: msg.funding_pubkey, + revocation_basepoint: msg.revocation_basepoint, + payment_point: msg.payment_point, + delayed_payment_basepoint: msg.delayed_payment_basepoint, + htlc_basepoint: msg.htlc_basepoint + }; + + self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: msg.to_self_delay, + pubkeys: counterparty_pubkeys, + }); + + self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); + self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + + self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; + self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + + Ok(()) + } } /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. @@ -7404,7 +7420,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match OutboundV1Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { + match OutboundV1Channel::::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -7427,7 +7443,7 @@ mod tests { let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let node_a_chan = OutboundV1Channel::::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. @@ -7446,6 +7462,7 @@ mod tests { let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); let logger = test_utils::TestLogger::new(); + let best_block = BestBlock::from_network(network); // Go through the flow of opening a channel between two nodes, making sure // they have different dust limits. @@ -7453,7 +7470,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -7467,6 +7484,18 @@ mod tests { node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; + // Node A --> Node B: funding created + let output_script = node_a_chan.context.get_funding_redeemscript(); + let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: 10000000, script_pubkey: output_script.clone(), + }]}; + let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + + // Node B --> Node A: funding signed + let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); + // Put some inbound and outbound HTLCs in A's channel. let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's. node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput { @@ -7521,7 +7550,7 @@ mod tests { let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = OutboundV1Channel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut chan = OutboundV1Channel::::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors()); let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors()); @@ -7570,7 +7599,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); @@ -7587,11 +7616,11 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger); + let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. @@ -7630,15 +7659,15 @@ mod tests { let mut config_101_percent = UserConfig::default(); config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101; - // Test that `new_outbound` creates a channel with the correct value for + // Test that `OutboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); + let chan_1 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); + let chan_2 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); @@ -7656,16 +7685,16 @@ mod tests { let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000; assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); - // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%) + // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); + let chan_5 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); - // Test that `new_outbound` uses the upper bound of the configurable percentage values + // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); + let chan_6 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); @@ -7686,7 +7715,7 @@ mod tests { #[test] fn test_configured_holder_selected_channel_reserve_satoshis() { - // Test that `new_outbound` and `new_from_req` create a channel with the correct + // Test that `OutboundV1Channel::new` and `new_from_req` create a channel with the correct // channel reserves, when `their_channel_reserve_proportional_millionths` is configured. test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02); @@ -7718,7 +7747,7 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = OutboundV1Channel::::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); + let chan = OutboundV1Channel::::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); @@ -7744,19 +7773,42 @@ mod tests { #[test] fn channel_update() { let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000}); + let logger = test_utils::TestLogger::new(); let secp_ctx = Secp256k1::new(); let seed = [42; 32]; let network = Network::Testnet; + let best_block = BestBlock::from_network(network); let chain_hash = genesis_block(network).header.block_hash(); let keys_provider = test_utils::TestKeysInterface::new(&seed, network); - // Create a channel. + // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); - assert!(node_a_chan.context.counterparty_forwarding_info.is_none()); - assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default - assert!(node_a_chan.context.counterparty_forwarding_info().is_none()); + let mut node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + + // Create Node B's channel by receiving Node A's open_channel message + // Make sure A's dust limit is as we expect. + let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); + let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + + // Node B --> Node A: accept channel, explicitly setting B's dust limit. + let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0); + accept_channel_msg.dust_limit_satoshis = 546; + node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); + node_a_chan.context.holder_dust_limit_satoshis = 1560; + + // Node A --> Node B: funding created + let output_script = node_a_chan.context.get_funding_redeemscript(); + let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: 10000000, script_pubkey: output_script.clone(), + }]}; + let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + + // Node B --> Node A: funding signed + let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); // Make sure that receiving a channel update will update the Channel as expected. let update = ChannelUpdate { @@ -7832,7 +7884,7 @@ mod tests { let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; - let mut chan = Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test + let mut chan = OutboundV1Channel::::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test chan.context.holder_dust_limit_satoshis = 546; chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -8551,7 +8603,7 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, + let node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -8585,7 +8637,7 @@ mod tests { // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both // need to signal it. - let channel_a = OutboundV1Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, &config, 0, 42 @@ -8596,7 +8648,7 @@ mod tests { expected_channel_type.set_static_remote_key_required(); expected_channel_type.set_anchors_zero_fee_htlc_tx_required(); - let channel_a = OutboundV1Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8634,7 +8686,7 @@ mod tests { let raw_init_features = static_remote_key_required | simple_anchors_required; let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec()); - let channel_a = OutboundV1Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8681,7 +8733,7 @@ mod tests { // First, we'll try to open a channel between A and B where A requests a channel type for // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by // B as it's not supported by LDK. - let channel_a = OutboundV1Channel::::new_outbound( + let channel_a = OutboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8700,7 +8752,7 @@ mod tests { // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the // original `option_anchors` feature, which should be rejected by A as it's not supported by // LDK. - let mut channel_a = OutboundV1Channel::::new_outbound( + let mut channel_a = OutboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, 10000000, 100000, 42, &config, 0, 42 ).unwrap(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e07c26eaf0f..27014052b04 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -607,11 +607,9 @@ impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction, /// State we hold per-peer. pub(super) struct PeerState { - /// `temporary_channel_id` or `channel_id` -> `channel`. + /// `channel_id` -> `Channel`. /// - /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a - /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the - /// `channel_id`. + /// Holds all funded channels where the peer is the counterparty. pub(super) channel_by_id: HashMap<[u8; 32], Channel>, /// `temporary_channel_id` -> `OutboundV1Channel`. /// @@ -1636,7 +1634,16 @@ macro_rules! handle_error { Err(err) }, } - } } + } }; + ($self: ident, $internal: expr) => { + match $internal { + Ok(res) => Ok(res), + Err((chan, msg_handle_err)) => { + let counterparty_node_id = chan.get_counterparty_node_id(); + handle_error!($self, Err(msg_handle_err), counterparty_node_id).map_err(|err| (chan, err)) + }, + } + }; } macro_rules! update_maps_on_chan_removal { @@ -1677,6 +1684,19 @@ macro_rules! convert_chan_err { shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, } + }; + ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => { + match $err { + // We should only ever have `ChannelError::Close` when prefunded channels error. + // In any case, just close the channel. + ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => { + log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); + update_maps_on_chan_removal!($self, &$channel_context); + let shutdown_res = $channel_context.force_shutdown(false); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(), + shutdown_res, None)) + }, + } } } @@ -1695,6 +1715,21 @@ macro_rules! break_chan_entry { } } +macro_rules! try_v1_outbound_chan_entry { + ($self: ident, $res: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(e) => { + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED); + if drop { + $entry.remove_entry(); + } + return Err(res); + } + } + } +} + macro_rules! try_chan_entry { ($self: ident, $res: expr, $entry: expr) => { match $res { @@ -2070,7 +2105,7 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - match OutboundV1Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, + match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height(), outbound_scid_alias) { @@ -2084,7 +2119,7 @@ where let res = channel.get_open_channel(self.genesis_hash.clone()); let temporary_channel_id = channel.context.channel_id(); - match peer_state.channel_by_id.entry(temporary_channel_id) { + match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(fuzzing) { return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); @@ -2102,7 +2137,7 @@ where Ok(temporary_channel_id) } - fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_funded_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside @@ -2129,7 +2164,32 @@ where /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for /// more information. pub fn list_channels(&self) -> Vec { - self.list_channels_with_filter(|_| true) + // Allocate our best estimate of the number of channels we have in the `res` + // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without + // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside + // of the ChannelMonitor handling. Therefore reallocations may still occur, but is + // unlikely as the `short_to_chan_info` map often contains 2 entries for + // the same channel. + let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len()); + { + let best_block_height = self.best_block.read().unwrap().height(); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (_channel_id, channel) in peer_state.channel_by_id.iter() { + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, + peer_state.latest_features.clone()); + res.push(details); + } + for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() { + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, + peer_state.latest_features.clone()); + res.push(details); + } + } + } + res } /// Gets the list of usable channels, in random order. Useful as an argument to @@ -2142,7 +2202,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_channels_with_filter(|&(_, ref channel)| channel.context.is_live()) + self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -2356,30 +2416,39 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(peer_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; - let mut chan = { + let (update_opt, counterparty_node_id) = { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let closure_reason = if let Some(peer_msg) = peer_msg { + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) } + } else { + ClosureReason::HolderForceClosed + }; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { - if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(&chan.get().context, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }); - } else { - self.issue_channel_close_events(&chan.get().context, ClosureReason::HolderForceClosed); - } - remove_channel!(self, chan) + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + self.issue_channel_close_events(&chan.get().context, closure_reason); + let mut chan = remove_channel!(self, chan); + self.finish_force_close_channel(chan.context.force_shutdown(broadcast)); + (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) + } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) { + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + self.issue_channel_close_events(&chan.get().context, closure_reason); + let mut chan = remove_channel!(self, chan); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + // Prefunded channel has no update + (None, chan.context.get_counterparty_node_id()) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); } }; - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.finish_force_close_channel(chan.context.force_shutdown(broadcast)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + if let Some(update) = update_opt { let mut peer_state = peer_state_mutex.lock().unwrap(); peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - Ok(chan.context.get_counterparty_node_id()) + Ok(counterparty_node_id) } fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { @@ -3118,7 +3187,7 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( + fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -3127,21 +3196,24 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (msg, chan) = match peer_state.channel_by_id.remove(temporary_channel_id) { - Some(mut chan) => { + let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) { + Some(chan) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.context.force_shutdown(true), None) + .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { + let channel_id = chan.context.channel_id(); + let user_id = chan.context.get_user_id(); + let shutdown_res = chan.context.force_shutdown(false); + (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None)) } else { unreachable!(); }); match funding_res { - Ok(funding_msg) => (funding_msg, chan), - Err(_) => { + Ok((chan, funding_msg)) => (chan, funding_msg), + Err((chan, err)) => { mem::drop(peer_state_lock); mem::drop(per_peer_state); - let _ = handle_error!(self, funding_res, chan.context.get_counterparty_node_id()); + let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id()); return Err(APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() }); @@ -3395,7 +3467,8 @@ where chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias()) }, None => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id) + err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.", + log_bytes!(*next_hop_channel_id), next_node_id) }) } }; @@ -4899,7 +4972,8 @@ where // Get the number of peers with channels, but without funded ones. We don't care too much // about peers that never open a channel, so we filter by peers that have at least one // channel, and then limit the number of those with unfunded channels. - let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty()); + let channeled_peers_without_funding = + self.peers_without_funded_channels(|node| node.total_channel_count() > 0); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -4913,7 +4987,7 @@ where // If this peer already has some channels, a new channel won't increase our number of peers // with unfunded channels, so as long as we aren't over the maximum number of unfunded // channels per-peer we can accept channels from a peer with existing ones. - if peer_state.channel_by_id.is_empty() && + if peer_state.total_channel_count() == 0 && channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS && !self.default_configuration.manually_accept_inbound_channels { @@ -4978,9 +5052,9 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); + try_v1_outbound_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) @@ -6822,6 +6896,11 @@ where } true }); + peer_state.outbound_v1_channel_by_id.retain(|_, chan| { + update_maps_on_chan_removal!(self, &chan.context); + self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); + false + }); pending_msg_events.retain(|msg| { match msg { // V1 Channel Establishment @@ -6980,7 +7059,9 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - peer_state.channel_by_id.keys().cloned().collect() + peer_state.channel_by_id.keys().cloned() + .chain(peer_state.outbound_v1_channel_by_id.keys().cloned()) + .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect() }; for channel_id in channel_ids { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel @@ -6994,7 +7075,7 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { + if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) { if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7209c4a0b83..16d59a292b3 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -783,17 +783,6 @@ macro_rules! get_channel_ref { } } -#[cfg(test)] -macro_rules! get_inbound_v1_channel_ref { - ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { - { - $per_peer_state_lock = $node.node.per_peer_state.read().unwrap(); - $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - $peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap() - } - } -} - #[cfg(test)] macro_rules! get_outbound_v1_channel_ref { ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index bfab15f314c..92b66505cda 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -179,9 +179,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; let mut sender_node_per_peer_lock; let mut sender_node_peer_state_lock; - let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); - chan.context.holder_selected_channel_reserve_satoshis = 0; - chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; + if send_from_initiator { + let chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + chan.context.holder_selected_channel_reserve_satoshis = 0; + chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; + } else { + let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + chan.context.holder_selected_channel_reserve_satoshis = 0; + chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; + } } let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); @@ -3614,8 +3620,8 @@ fn test_peer_disconnected_before_funding_broadcasted() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); + check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false); + check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false); } #[test] @@ -6950,8 +6956,8 @@ fn test_user_configurable_csv_delay() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new_outbound() - if let Err(error) = OutboundV1Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() + if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, &low_our_to_self_config, 0, 42) { @@ -8928,16 +8934,16 @@ fn test_duplicate_chan_id() { nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event - let funding_created = { + let (_, funding_created) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); + let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); let logger = test_utils::TestLogger::new(); - as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap() + as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap() }; check_added_monitors!(nodes[0], 0); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); @@ -9603,7 +9609,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if on_holder_tx { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); + let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); chan.context.holder_dust_limit_satoshis = 546; } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index ab010b5a723..90c7ad7625c 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1635,7 +1635,9 @@ fn do_test_intercepted_payment(test: InterceptTest) { // Check for unknown channel id error. let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) }); + assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { + err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.", + log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) }); if test == InterceptTest::Fail { // Ensure we can fail the intercepted payment back. @@ -1659,7 +1661,9 @@ fn do_test_intercepted_payment(test: InterceptTest) { // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet. let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap(); let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(temp_chan_id)) }); + assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { + err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.", + log_bytes!(temp_chan_id), nodes[2].node.get_our_node_id()) }); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); // Open the just-in-time channel so the payment can then be forwarded. From 637e03a3dee1e2476a4fb40cd50fa6828abbc9cb Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 18:14:59 +0200 Subject: [PATCH 23/25] Move inbound channel methods into `InboundV1Channel`'s impl --- lightning/src/ln/channel.rs | 419 +++++++++++----------- lightning/src/ln/channelmanager.rs | 50 ++- lightning/src/ln/functional_test_utils.rs | 11 + lightning/src/ln/functional_tests.rs | 4 +- 4 files changed, 268 insertions(+), 216 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e1f3e85ff46..7da903f4e0c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2010,8 +2010,6 @@ struct CommitmentTxInfoCached { } impl Channel { - // Constructors: - fn check_remote_fee(fee_estimator: &LowerBoundedFeeEstimator, feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger, @@ -2422,129 +2420,6 @@ impl Channel { // Message handlers: - fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { - let funding_script = self.context.get_funding_redeemscript(); - - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; - { - let trusted_tx = initial_commitment_tx.trust(); - let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); - // They sign the holder commitment transaction... - log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", - log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), - encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), - encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); - secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); - } - - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - - let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); - let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - - let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; - - // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. - Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) - } - - pub fn funding_created( - &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> - where - SP::Target: SignerProvider, - L::Target: Logger - { - if self.context.is_outbound() { - return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { - // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT - // remember the channel, so it's safe to just send an error_message here and drop the - // channel. - return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned())); - } - if self.context.inbound_awaiting_accept { - return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned())); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); - } - - let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; - self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - // This is an externally observable change before we finish all our checks. In particular - // funding_created_signature may fail. - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - - let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { - Ok(res) => res, - Err(ChannelError::Close(e)) => { - self.context.channel_transaction_parameters.funding_outpoint = None; - return Err(ChannelError::Close(e)); - }, - Err(e) => { - // The only error we know how to handle is ChannelError::Close, so we fall over here - // to make sure we don't continue with an inconsistent state. - panic!("unexpected error type from funding_created_signature {:?}", e); - } - }; - - let holder_commitment_tx = HolderCommitmentTransaction::new( - initial_commitment_tx, - msg.signature, - Vec::new(), - &self.context.get_holder_pubkeys().funding_pubkey, - self.context.counterparty_funding_pubkey() - ); - - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - - // Now that we're past error-generating stuff, update our local state: - - let funding_redeemscript = self.context.get_funding_redeemscript(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); - let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.context.get_holder_selected_contest_delay(), - &self.context.destination_script, (funding_txo, funding_txo_script.clone()), - &self.context.channel_transaction_parameters, - funding_redeemscript.clone(), self.context.channel_value_satoshis, - obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); - - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); - - self.context.channel_state = ChannelState::FundingSent as u32; - self.context.channel_id = funding_txo.to_channel_id(); - self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.cur_holder_commitment_transaction_number -= 1; - - log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id())); - - let need_channel_ready = self.check_get_channel_ready(0).is_some(); - self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); - - Ok((msgs::FundingSigned { - channel_id: self.context.channel_id, - signature, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }, channel_monitor)) - } - /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) pub fn funding_signed( @@ -4938,83 +4813,6 @@ impl Channel { // Methods to get unprompted messages to send to the remote end (or where we already returned // something in the handler for the message that prompted this message): - pub fn inbound_is_awaiting_accept(&self) -> bool { - self.context.inbound_awaiting_accept - } - - /// Sets this channel to accepting 0conf, must be done before `get_accept_channel` - pub fn set_0conf(&mut self) { - assert!(self.context.inbound_awaiting_accept); - self.context.minimum_depth = Some(0); - } - - /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which - /// should be sent back to the counterparty node. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { - if self.context.is_outbound() { - panic!("Tried to send accept_channel for an outbound channel?"); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { - panic!("Tried to send accept_channel after channel had moved forward"); - } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Tried to send an accept_channel for a channel that has already advanced"); - } - if !self.context.inbound_awaiting_accept { - panic!("The inbound channel has already been accepted"); - } - - self.context.user_id = user_id; - self.context.inbound_awaiting_accept = false; - - self.generate_accept_channel_message() - } - - /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an - /// inbound channel. If the intention is to accept an inbound channel, use - /// [`Channel::accept_inbound_channel`] instead. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.context.get_holder_pubkeys(); - - msgs::AcceptChannel { - temporary_channel_id: self.context.channel_id, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - minimum_depth: self.context.minimum_depth.unwrap(), - to_self_delay: self.context.get_holder_selected_contest_delay(), - max_accepted_htlcs: self.context.holder_max_accepted_htlcs, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint, - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint, - htlc_basepoint: keys.htlc_basepoint, - first_per_commitment_point, - shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), - #[cfg(taproot)] - next_local_nonce: None, - } - } - - /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an - /// inbound channel without accepting it. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - #[cfg(test)] - pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel { - self.generate_accept_channel_message() - } - /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly /// announceable and available for use (have exchanged ChannelReady messages in both /// directions). Should be used for both broadcasted announcements and in response to an @@ -6142,7 +5940,7 @@ impl InboundV1Channel { counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, outbound_scid_alias: u64 - ) -> Result, ChannelError> + ) -> Result, ChannelError> where ES::Target: EntropySource, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -6332,7 +6130,7 @@ impl InboundV1Channel { let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - let chan = Channel { + let chan = Self { context: ChannelContext { user_id, @@ -6466,6 +6264,213 @@ impl InboundV1Channel { Ok(chan) } + + pub fn inbound_is_awaiting_accept(&self) -> bool { + self.context.inbound_awaiting_accept + } + + /// Sets this channel to accepting 0conf, must be done before `get_accept_channel` + pub fn set_0conf(&mut self) { + assert!(self.context.inbound_awaiting_accept); + self.context.minimum_depth = Some(0); + } + + /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which + /// should be sent back to the counterparty node. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { + if self.context.is_outbound() { + panic!("Tried to send accept_channel for an outbound channel?"); + } + if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { + panic!("Tried to send accept_channel after channel had moved forward"); + } + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Tried to send an accept_channel for a channel that has already advanced"); + } + if !self.context.inbound_awaiting_accept { + panic!("The inbound channel has already been accepted"); + } + + self.context.user_id = user_id; + self.context.inbound_awaiting_accept = false; + + self.generate_accept_channel_message() + } + + /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an + /// inbound channel. If the intention is to accept an inbound channel, use + /// [`InboundV1Channel::accept_inbound_channel`] instead. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let keys = self.context.get_holder_pubkeys(); + + msgs::AcceptChannel { + temporary_channel_id: self.context.channel_id, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + minimum_depth: self.context.minimum_depth.unwrap(), + to_self_delay: self.context.get_holder_selected_contest_delay(), + max_accepted_htlcs: self.context.holder_max_accepted_htlcs, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint, + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint, + htlc_basepoint: keys.htlc_basepoint, + first_per_commitment_point, + shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + #[cfg(taproot)] + next_local_nonce: None, + } + } + + /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an + /// inbound channel without accepting it. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + #[cfg(test)] + pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel { + self.generate_accept_channel_message() + } + + fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { + let funding_script = self.context.get_funding_redeemscript(); + + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + { + let trusted_tx = initial_commitment_tx.trust(); + let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + // They sign the holder commitment transaction... + log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", + log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), + encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), + encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); + } + + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + + let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); + let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + + let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; + + // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. + Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) + } + + pub fn funding_created( + mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L + ) -> Result<(Channel, msgs::FundingSigned, ChannelMonitor), (Self, ChannelError)> + where + SP::Target: SignerProvider, + L::Target: Logger + { + if self.context.is_outbound() { + return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned()))); + } + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT + // remember the channel, so it's safe to just send an error_message here and drop the + // channel. + return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned()))); + } + if self.context.inbound_awaiting_accept { + return Err((self, ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()))); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + } + + let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + // This is an externally observable change before we finish all our checks. In particular + // funding_created_signature may fail. + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + + let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { + Ok(res) => res, + Err(ChannelError::Close(e)) => { + self.context.channel_transaction_parameters.funding_outpoint = None; + return Err((self, ChannelError::Close(e))); + }, + Err(e) => { + // The only error we know how to handle is ChannelError::Close, so we fall over here + // to make sure we don't continue with an inconsistent state. + panic!("unexpected error type from funding_created_signature {:?}", e); + } + }; + + let holder_commitment_tx = HolderCommitmentTransaction::new( + initial_commitment_tx, + msg.signature, + Vec::new(), + &self.context.get_holder_pubkeys().funding_pubkey, + self.context.counterparty_funding_pubkey() + ); + + if let Err(_) = self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) { + return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned()))); + } + + // Now that we're past error-generating stuff, update our local state: + + let funding_redeemscript = self.context.get_funding_redeemscript(); + let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, + shutdown_script, self.context.get_holder_selected_contest_delay(), + &self.context.destination_script, (funding_txo, funding_txo_script.clone()), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, + obscure_factor, + holder_commitment_tx, best_block, self.context.counterparty_node_id); + + channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + + self.context.channel_state = ChannelState::FundingSent as u32; + self.context.channel_id = funding_txo.to_channel_id(); + self.context.cur_counterparty_commitment_transaction_number -= 1; + self.context.cur_holder_commitment_transaction_number -= 1; + + log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id())); + + // Promote the channel to a full-fledged one now that we have updated the state and have a + // `ChannelMonitor`. + let mut channel = Channel { + context: self.context, + }; + let channel_id = channel.context.channel_id.clone(); + let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + + Ok((channel, msgs::FundingSigned { + channel_id, + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }, channel_monitor)) + } } const SERIALIZATION_VERSION: u8 = 3; @@ -7491,7 +7496,7 @@ mod tests { }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); - let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); @@ -7617,7 +7622,7 @@ mod tests { }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); - let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); @@ -7805,7 +7810,7 @@ mod tests { }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); - let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 27014052b04..2dd091010c9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2182,6 +2182,11 @@ where peer_state.latest_features.clone()); res.push(details); } + for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() { + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, + peer_state.latest_features.clone()); + res.push(details); + } for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, peer_state.latest_features.clone()); @@ -2437,6 +2442,13 @@ where self.finish_force_close_channel(chan.context.force_shutdown(false)); // Prefunded channel has no update (None, chan.context.get_counterparty_node_id()) + } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + self.issue_channel_close_events(&chan.get().context, closure_reason); + let mut chan = remove_channel!(self, chan); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + // Prefunded channel has no update + (None, chan.context.get_counterparty_node_id()) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); } @@ -4872,7 +4884,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let is_only_peer_channel = peer_state.total_channel_count() == 1; - match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { + match peer_state.inbound_v1_channel_by_id.entry(temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut channel) => { if !channel.get().inbound_is_awaiting_accept() { return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); @@ -4946,12 +4958,19 @@ where ) -> usize { let mut num_unfunded_channels = 0; for (_, chan) in peer.channel_by_id.iter() { + // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those + // which have not yet had any confirmations on-chain. if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 && chan.context.get_funding_tx_confirmations(best_block_height) == 0 { num_unfunded_channels += 1; } } + for (_, chan) in peer.inbound_v1_channel_by_id.iter() { + if chan.context.minimum_depth().unwrap_or(1) != 0 { + num_unfunded_channels += 1; + } + } num_unfunded_channels } @@ -5037,7 +5056,7 @@ where channel_type: channel.context.get_channel_type().clone(), }, None)); } - peer_state.channel_by_id.insert(channel_id, channel); + peer_state.inbound_v1_channel_by_id.insert(channel_id, channel); } Ok(()) } @@ -5083,12 +5102,24 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let ((funding_msg, monitor), chan) = - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) + let (chan, funding_msg, monitor) = + match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) { + Some(inbound_chan) => { + match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) { + Ok(res) => res, + Err((mut inbound_chan, err)) => { + // We've already removed this inbound channel from the map in `PeerState` + // above so at this point we just need to clean up any lingering entries + // concerning this channel as it is safe to do so. + update_maps_on_chan_removal!(self, &inbound_chan.context); + let user_id = inbound_chan.context.get_user_id(); + let shutdown_res = inbound_chan.context.force_shutdown(false); + return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err), + msg.temporary_channel_id, user_id, shutdown_res, None)); + }, + } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; match peer_state.channel_by_id.entry(funding_msg.channel_id) { @@ -6896,6 +6927,11 @@ where } true }); + peer_state.inbound_v1_channel_by_id.retain(|_, chan| { + update_maps_on_chan_removal!(self, &chan.context); + self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); + false + }); peer_state.outbound_v1_channel_by_id.retain(|_, chan| { update_maps_on_chan_removal!(self, &chan.context); self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 16d59a292b3..6107642cbf2 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -794,6 +794,17 @@ macro_rules! get_outbound_v1_channel_ref { } } +#[cfg(test)] +macro_rules! get_inbound_v1_channel_ref { + ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { + { + $per_peer_state_lock = $node.node.per_peer_state.read().unwrap(); + $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + $peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap() + } + } +} + #[cfg(test)] macro_rules! get_feerate { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 92b66505cda..b9468dc7821 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -180,7 +180,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let mut sender_node_per_peer_lock; let mut sender_node_peer_state_lock; if send_from_initiator { - let chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); chan.context.holder_selected_channel_reserve_satoshis = 0; chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; } else { @@ -7874,7 +7874,7 @@ fn test_reject_funding_before_inbound_channel_accepted() { let accept_chan_msg = { let mut node_1_per_peer_lock; let mut node_1_peer_state_lock; - let channel = get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); + let channel = get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); channel.get_accept_channel_message() }; nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg); From 8f93e2dc944ebb9a07e2be97773649525121e31f Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 20:49:26 +0200 Subject: [PATCH 24/25] Rename `InboundV1Channel::new_from_req` to `InboundV1Channel::new` --- lightning/src/ln/channel.rs | 40 ++++++++++++++-------------- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/functional_tests.rs | 8 +++--- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7da903f4e0c..0b4810030c1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1422,7 +1422,7 @@ impl ChannelContext { /// Gets the redeemscript for the funding transaction output (ie the funding transaction output /// pays to get_funding_redeemscript().to_v0_p2wsh()). - /// Panics if called before accept_channel/new_from_req + /// Panics if called before accept_channel/InboundV1Channel::new pub fn get_funding_redeemscript(&self) -> Script { make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } @@ -5935,7 +5935,7 @@ pub(super) struct InboundV1Channel { impl InboundV1Channel { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! - pub fn new_from_req( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, @@ -7481,7 +7481,7 @@ mod tests { // Make sure A's dust limit is as we expect. let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + let mut node_b_chan = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0); @@ -7609,7 +7609,7 @@ mod tests { // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + let mut node_b_chan = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); // Node B --> Node A: accept channel let accept_channel_msg = node_b_chan.accept_inbound_channel(0); @@ -7678,15 +7678,15 @@ mod tests { let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash()); - // Test that `new_from_req` creates a channel with the correct value for + // Test that `InboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound - 1 (2%) of the `channel_value`. - let chan_3 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap(); + let chan_3 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap(); let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000; assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_4 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap(); + let chan_4 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap(); let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000; assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); @@ -7703,16 +7703,16 @@ mod tests { let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); - // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%) + // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_7 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap(); + let chan_7 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap(); let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000; assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64); - // Test that `new_from_req` uses the upper bound of the configurable percentage values + // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_8 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap(); + let chan_8 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap(); let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000; assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat); } @@ -7720,7 +7720,7 @@ mod tests { #[test] fn test_configured_holder_selected_channel_reserve_satoshis() { - // Test that `OutboundV1Channel::new` and `new_from_req` create a channel with the correct + // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct // channel reserves, when `their_channel_reserve_proportional_millionths` is configured. test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02); @@ -7762,7 +7762,7 @@ mod tests { inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 { - let chan_inbound_node = InboundV1Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap(); + let chan_inbound_node = InboundV1Channel::::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap(); let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64); @@ -7770,7 +7770,7 @@ mod tests { assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve); } else { // Channel Negotiations failed - let result = InboundV1Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42); + let result = InboundV1Channel::::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42); assert!(result.is_err()); } } @@ -7795,7 +7795,7 @@ mod tests { // Make sure A's dust limit is as we expect. let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + let mut node_b_chan = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0); @@ -8617,7 +8617,7 @@ mod tests { let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); open_channel_msg.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let res = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, + let res = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42); assert!(res.is_ok()); @@ -8659,7 +8659,7 @@ mod tests { ).unwrap(); let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); - let channel_b = InboundV1Channel::::new_from_req( + let channel_b = InboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8702,7 +8702,7 @@ mod tests { // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts // `static_remote_key`, it will fail the channel. - let channel_b = InboundV1Channel::::new_from_req( + let channel_b = InboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors, &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8746,7 +8746,7 @@ mod tests { let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); - let res = InboundV1Channel::::new_from_req( + let res = InboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &simple_anchors_init, &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8764,7 +8764,7 @@ mod tests { let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); - let channel_b = InboundV1Channel::::new_from_req( + let channel_b = InboundV1Channel::::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42 diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2dd091010c9..4dec5cb1a32 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5022,7 +5022,7 @@ where msg.temporary_channel_id.clone())); } - let mut channel = match InboundV1Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, + let mut channel = match InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias) { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index b9468dc7821..f03ecd14f66 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -6967,11 +6967,11 @@ fn test_user_configurable_csv_delay() { } } else { assert!(false) } - // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new_from_req() + // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger, 42) { @@ -6999,11 +6999,11 @@ fn test_user_configurable_csv_delay() { } else { panic!(); } check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }); - // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new_from_req() + // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger, 42) { From d957f362ff18c28ad011c93cee83227f8aca3bd9 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Tue, 13 Jun 2023 17:17:59 +0200 Subject: [PATCH 25/25] Rename `inbound_is_awaiting_accept()` to `is_awaiting_accept()` --- lightning/src/ln/channel.rs | 2 +- lightning/src/ln/channelmanager.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0b4810030c1..e74dbe42a77 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6265,7 +6265,7 @@ impl InboundV1Channel { Ok(chan) } - pub fn inbound_is_awaiting_accept(&self) -> bool { + pub fn is_awaiting_accept(&self) -> bool { self.context.inbound_awaiting_accept } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4dec5cb1a32..164e4c2242d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4886,7 +4886,7 @@ where let is_only_peer_channel = peer_state.total_channel_count() == 1; match peer_state.inbound_v1_channel_by_id.entry(temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut channel) => { - if !channel.get().inbound_is_awaiting_accept() { + if !channel.get().is_awaiting_accept() { return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); } if accept_0conf {