Skip to content

Commit

Permalink
Split PeerManager::handle_message to avoid explicit mem::drop
Browse files Browse the repository at this point in the history
Previously, `handle_message` was a single large method consisting of two
logical parts: one modifying the peer state hence requiring us to hold
the `peer_lock` `MutexGuard`, and, after calling `mem::drop(peer_lock)`,
the remainder which does not only *not* require to hold the
`MutexGuard`, but relies on it being dropped to avoid double-locking.

However, the `mem::drop` was easily overlooked, making reasoning about
lock orders etc. a headache. Here, we therefore have
`handle_message` call two sub-methods reflecting the two logical parts,
allowing us to avoid the explicit `mem::drop`, while at the same time
making it less error-prone due to the two methods' signatures.
  • Loading branch information
tnull committed Apr 4, 2024
1 parent b8b1ef3 commit f2ecf8d
Showing 1 changed file with 40 additions and 6 deletions.
46 changes: 40 additions & 6 deletions lightning/src/ln/peer_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1588,15 +1588,37 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
}

/// Process an incoming message and return a decision (ok, lightning error, peer handling error) regarding the next action with the peer
///
/// Returns the message back if it needs to be broadcasted to all other peers.
fn handle_message(
&self,
peer_mutex: &Mutex<Peer>,
mut peer_lock: MutexGuard<Peer>,
message: wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>
) -> Result<Option<wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
peer_lock: MutexGuard<Peer>,
message: wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>
) -> Result<Option<wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages").0;
let logger = WithContext::from(&self.logger, Some(their_node_id), None);

let message = match self.do_handle_message_holding_peer_lock(peer_lock, message, &their_node_id, &logger)? {
Some(processed_message) => processed_message,
None => return Ok(None),
};

self.do_handle_message_without_peer_lock(peer_mutex, message, &their_node_id, &logger)
}

// Conducts all message processing that requires us to hold the `peer_lock`.
//
// Returns `None` if the message was fully processed and otherwise returns the message back to
// allow it to be subsequently processed by `do_handle_message_without_peer_lock`.
fn do_handle_message_holding_peer_lock<'a>(
&self,
mut peer_lock: MutexGuard<Peer>,
message: wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>,
their_node_id: &PublicKey,
logger: &WithContext<'a, L>
) -> Result<Option<wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError>
{
peer_lock.received_message_since_timer_tick = true;

// Need an Init as first message
Expand Down Expand Up @@ -1677,8 +1699,20 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
peer_lock.received_channel_announce_since_backlogged = true;
}

mem::drop(peer_lock);
Ok(Some(message))
}

// Conducts all message processing that doesn't require us to hold the `peer_lock`.
//
// Returns the message back if it needs to be broadcasted to all other peers.
fn do_handle_message_without_peer_lock<'a>(
&self,
peer_mutex: &Mutex<Peer>,
message: wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>,
their_node_id: &PublicKey,
logger: &WithContext<'a, L>
) -> Result<Option<wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError>
{
if is_gossip_msg(message.type_id()) {
log_gossip!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
} else {
Expand Down Expand Up @@ -1880,7 +1914,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
Ok(should_forward)
}

fn forward_broadcast_msg(&self, peers: &HashMap<Descriptor, Mutex<Peer>>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
fn forward_broadcast_msg(&self, peers: &HashMap<Descriptor, Mutex<Peer>>, msg: &wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
match msg {
wire::Message::ChannelAnnouncement(ref msg) => {
log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
Expand Down Expand Up @@ -2272,7 +2306,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
// We do not have the peers write lock, so we just store that we're
// about to disconnect the peer and do it after we finish
// processing most messages.
let msg = msg.map(|msg| wire::Message::<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
let msg = msg.map(|msg| wire::Message::<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
peers_to_disconnect.insert(node_id, msg);
},
msgs::ErrorAction::DisconnectPeerWithWarning { msg } => {
Expand Down

0 comments on commit f2ecf8d

Please sign in to comment.