Skip to content

Commit

Permalink
Remove Outpoint::to_channel_id method
Browse files Browse the repository at this point in the history
To avoid confusion and for accuracy going forward, we remove this method
as it is inconsistent with channel IDs generated during V2 channel
establishment. If one wants to create a V1, funding outpoint-based
channel ID, then `ChannelId::v1_from_funding_outpoint` should be used
instead.

A large portion of the library has always made the assumption that having
the funding outpoint will always allow us to generate the channel ID.
This will not be the case anymore and we need to pass the channel ID along
where appropriate. All channels that could have been persisted up to this
point could only have used V1 establishment, so if some structures don't
store a channel ID for them they can safely fall back to the funding
outpoint-based version.
  • Loading branch information
dunxen committed Jan 25, 2024
1 parent 51d9ee3 commit 32d7d55
Show file tree
Hide file tree
Showing 24 changed files with 336 additions and 247 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@ lightning-rapid-gossip-sync/res/full_graph.lngossip
lightning-custom-message/target
lightning-transaction-sync/target
no-std-check/target
msrv-no-dev-deps-check/target
8 changes: 4 additions & 4 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget,
use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
use lightning::events;
use lightning::events::MessageSendEventsProvider;
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
Expand Down Expand Up @@ -176,7 +176,7 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
self.chain_monitor.watch_channel(funding_txo, monitor)
}

fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
let mut map_lock = self.latest_monitors.lock().unwrap();
let mut map_entry = match map_lock.entry(funding_txo) {
hash_map::Entry::Occupied(entry) => entry,
Expand All @@ -188,10 +188,10 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
let mut ser = VecWriter(Vec::new());
deserialized_monitor.write(&mut ser).unwrap();
map_entry.insert((update.update_id, ser.0));
self.chain_monitor.update_channel(funding_txo, update)
self.chain_monitor.update_channel(funding_txo, channel_id, update)
}

fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}
Expand Down
5 changes: 2 additions & 3 deletions fuzz/src/full_stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ use bitcoin::consensus::encode::deserialize;
use bitcoin::network::constants::Network;

use bitcoin::hashes::hex::FromHex;
use bitcoin::hashes::Hash as TraitImport;
use bitcoin::hashes::HashEngine as TraitImportEngine;
use bitcoin::hashes::Hash as _;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
Expand Down Expand Up @@ -651,7 +650,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
if let None = loss_detector.txids_confirmed.get(&funding_txid) {
let outpoint = OutPoint { txid: funding_txid, index: 0 };
for chan in channelmanager.list_channels() {
if chan.channel_id == outpoint.to_channel_id() {
if chan.funding_txo == Some(outpoint) {
tx.version += 1;
continue 'search_loop;
}
Expand Down
4 changes: 2 additions & 2 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -929,7 +929,7 @@ mod tests {
use lightning::chain::transaction::OutPoint;
use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
use lightning::{get_event_msg, get_event};
use lightning::ln::PaymentHash;
use lightning::ln::{PaymentHash, ChannelId};
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
use lightning::ln::features::{ChannelFeatures, NodeFeatures};
Expand Down Expand Up @@ -1414,7 +1414,7 @@ mod tests {
}

// Force-close the channel.
nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();

// Check that the force-close updates are persisted.
check_persisted_data!(nodes[0].node, filepath.clone());
Expand Down
4 changes: 2 additions & 2 deletions lightning-persister/src/fs_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ mod tests {
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();

// Set the store's directory to read-only, which should result in
// returning an unrecoverable failure when we then attempt to persist a
Expand Down Expand Up @@ -489,7 +489,7 @@ mod tests {
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();

// Create the store with an invalid directory name and test that the
// channel fails to open because the directories fail to be created. There
Expand Down
39 changes: 25 additions & 14 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::ln::ChannelId;
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::events;
use crate::events::{Event, EventHandler};
Expand Down Expand Up @@ -158,7 +159,7 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`Writeable::write`]: crate::util::ser::Writeable::write
fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;

/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
/// update.
Expand Down Expand Up @@ -193,7 +194,7 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
/// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`Writeable::write`]: crate::util::ser::Writeable::write
fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
}

struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
Expand Down Expand Up @@ -287,7 +288,7 @@ pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T:
persister: P,
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
/// from the user and not from a [`ChannelMonitor`].
pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,

Expand Down Expand Up @@ -471,12 +472,15 @@ where C::Target: chain::Filter,
}
}

/// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
/// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
///
/// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
/// monitoring for on-chain state resolutions.
pub fn list_monitors(&self) -> Vec<OutPoint> {
self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
let channel_id = monitor_holder.monitor.channel_id();
(*outpoint, channel_id)
}).collect()
}

#[cfg(not(c_bindings))]
Expand Down Expand Up @@ -542,8 +546,9 @@ where C::Target: chain::Filter,
// Completed event.
return Ok(());
}
self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
funding_txo,
let channel_id = monitor_data.monitor.channel_id();
self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
funding_txo, channel_id,
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
}], monitor_data.monitor.get_counterparty_node_id()));
},
Expand All @@ -565,9 +570,14 @@ where C::Target: chain::Filter,
#[cfg(any(test, fuzzing))]
pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
let monitors = self.monitors.read().unwrap();
let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
(m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
} else {
(None, ChannelId::v1_from_funding_outpoint(funding_txo))
};
self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
funding_txo,
channel_id,
monitor_update_id,
}], counterparty_node_id));
self.event_notifier.notify();
Expand Down Expand Up @@ -752,12 +762,12 @@ where C::Target: chain::Filter,
Ok(persist_res)
}

fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
// Update the monitor that watches the channel referred to by the given outpoint.
let monitors = self.monitors.read().unwrap();
match monitors.get(&funding_txo) {
None => {
let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id()));
let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id));
log_error!(logger, "Failed to update channel monitor: no such monitor registered");

// We should never ever trigger this from within ChannelManager. Technically a
Expand Down Expand Up @@ -815,7 +825,7 @@ where C::Target: chain::Filter,
}
}

fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
for monitor_state in self.monitors.read().unwrap().values() {
let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
Expand All @@ -829,8 +839,9 @@ where C::Target: chain::Filter,
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
if monitor_events.len() > 0 {
let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
let monitor_channel_id = monitor_state.monitor.channel_id();
let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
}
}
}
Expand Down
Loading

0 comments on commit 32d7d55

Please sign in to comment.