From 5ed3f25b21a4dcd9075e550735dc688d820b9f41 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 23 Jun 2022 20:25:58 +0000 Subject: [PATCH] Add ChannelManager methods to force close without broadcasting If a user restores from a backup that they know is stale, they'd like to force-close all of their channels (or at least the ones they know are stale) *without* broadcasting the latest state, asking their peers to do so instead. This simply adds methods to do so, renaming the existing `force_close_channel` and `force_close_all_channels` methods to disambiguate further. --- fuzz/src/full_stack.rs | 4 +- lightning-background-processor/src/lib.rs | 4 +- lightning-persister/src/lib.rs | 6 +-- lightning/src/ln/chanmon_update_fail_tests.rs | 2 +- lightning/src/ln/channelmanager.rs | 49 ++++++++++++++----- lightning/src/ln/functional_tests.rs | 22 ++++----- lightning/src/ln/payment_tests.rs | 2 +- lightning/src/ln/priv_short_conf_tests.rs | 2 +- lightning/src/util/events.rs | 12 ++--- 9 files changed, 64 insertions(+), 39 deletions(-) diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index fae14de57f1..cff00669a9d 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -252,7 +252,7 @@ impl<'a> Drop for MoneyLossDetector<'a> { } // Force all channels onto the chain (and time out claim txn) - self.manager.force_close_all_channels(); + self.manager.force_close_all_channels_broadcasting_latest_txn(); } } } @@ -624,7 +624,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { let channel_id = get_slice!(1)[0] as usize; if channel_id >= channels.len() { return; } channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) }); - channelmanager.force_close_channel(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap(); + channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap(); }, // 15 is above _ => return, diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 10ae69e2fe1..11dca44bbab 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -744,7 +744,7 @@ mod tests { } // Force-close the channel. - nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); // Check that the force-close updates are persisted. check_persisted_data!(nodes[0].node, filepath.clone()); @@ -880,7 +880,7 @@ mod tests { let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); // Force close the channel and check that the SpendableOutputs event was handled. - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap(); confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32); let event = receiver diff --git a/lightning-persister/src/lib.rs b/lightning-persister/src/lib.rs index 28845150c44..3e32791711e 100644 --- a/lightning-persister/src/lib.rs +++ b/lightning-persister/src/lib.rs @@ -213,7 +213,7 @@ mod tests { // Force close because cooperative close doesn't result in any persisted // updates. - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -247,7 +247,7 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); @@ -286,7 +286,7 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 91575195a88..0d2b3b6a9f9 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -226,7 +226,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } // ...and make sure we can force-close a frozen channel - nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 54608e05e42..df0d93ef41b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1945,7 +1945,8 @@ impl ChannelMana /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result { + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) + -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -1964,7 +1965,7 @@ impl ChannelMana } }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.finish_force_close_channel(chan.force_shutdown(true)); + self.finish_force_close_channel(chan.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -1975,13 +1976,9 @@ impl ChannelMana Ok(chan.get_counterparty_node_id()) } - /// Force closes a channel, immediately broadcasting the latest local commitment transaction to - /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to - /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding - /// channel. - pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> { + fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) { + match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -1997,11 +1994,39 @@ impl ChannelMana } } + /// Force closes a channel, immediately broadcasting the latest local transaction(s) and + /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to + /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding + /// channel. + pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) + -> Result<(), APIError> { + self.force_close_sending_error(channel_id, counterparty_node_id, true) + } + + /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting + /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the + /// `counterparty_node_id` isn't the counterparty of the corresponding channel. + /// + /// You can always get the latest local transaction(s) to broadcast from + /// [`ChannelMonitor::get_latest_holder_commitment_txn`]. + pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) + -> Result<(), APIError> { + self.force_close_sending_error(channel_id, counterparty_node_id, false) + } + /// Force close all channels, immediately broadcasting the latest local commitment transaction /// for each to the chain and rejecting new HTLCs on each. - pub fn force_close_all_channels(&self) { + pub fn force_close_all_channels_broadcasting_latest_txn(&self) { + for chan in self.list_channels() { + let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id); + } + } + + /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest + /// local transaction(s). + pub fn force_close_all_channels_without_broadcasting_txn(&self) { for chan in self.list_channels() { - let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id); + let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id); } } @@ -6058,7 +6083,7 @@ impl for chan in self.list_channels() { if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true); } } } else { @@ -6080,7 +6105,7 @@ impl } // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true); } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 02ef5f37b9b..54817f35d7b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2203,7 +2203,7 @@ fn channel_monitor_network_test() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); // Simple case with no pending HTLCs: - nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); { @@ -2224,7 +2224,7 @@ fn channel_monitor_network_test() { // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). - nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); { @@ -2264,7 +2264,7 @@ fn channel_monitor_network_test() { // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) - nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap(); + nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[2], 1); check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; @@ -3403,7 +3403,7 @@ fn test_htlc_ignore_latest_remote_commitment() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -3466,7 +3466,7 @@ fn test_force_close_fail_back() { // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); @@ -4793,7 +4793,7 @@ fn test_claim_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); @@ -4822,7 +4822,7 @@ fn test_claim_on_remote_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); - nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); @@ -8365,7 +8365,7 @@ fn test_manually_accept_inbound_channel_request() { _ => panic!("Unexpected event"), } - nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_msg_ev.len(), 1); @@ -8400,7 +8400,7 @@ fn test_manually_reject_inbound_channel_request() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); } _ => panic!("Unexpected event"), } @@ -9053,7 +9053,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain force_closing_node = 1; counterparty_node = 0; } - nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap(); + nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors!(nodes[force_closing_node], 1); check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed); @@ -9489,7 +9489,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 37ca25babe9..7f725a42141 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -584,7 +584,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 237a7fa3160..508ba414037 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -817,7 +817,7 @@ fn test_0conf_close_no_early_chan_update() { // We can use the channel immediately, but won't generate a channel_update until we get confs send_payment(&nodes[0], &[&nodes[1]], 100_000); - nodes[0].node.force_close_all_channels(); + nodes[0].node.force_close_all_channels_broadcasting_latest_txn(); check_added_monitors!(nodes[0], 1); check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed); let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id()); diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 14ab5ee3c4b..caba7753f3f 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -459,33 +459,33 @@ pub enum Event { /// Indicates a request to open a new channel by a peer. /// /// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the - /// request, call [`ChannelManager::force_close_channel`]. + /// request, call [`ChannelManager::force_close_without_broadcasting_txn`]. /// /// The event is only triggered when a new open channel request is received and the /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel + /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels OpenChannelRequest { /// The temporary channel ID of the channel requested to be opened. /// /// When responding to the request, the `temporary_channel_id` should be passed /// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept, - /// or through [`ChannelManager::force_close_channel`] to reject. + /// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel + /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn temporary_channel_id: [u8; 32], /// The node_id of the counterparty requesting to open the channel. /// /// When responding to the request, the `counterparty_node_id` should be passed /// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to - /// accept the request, or through [`ChannelManager::force_close_channel`] to reject the + /// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the /// request. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel + /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn counterparty_node_id: PublicKey, /// The channel value of the requested channel. funding_satoshis: u64,