Skip to content

Commit

Permalink
Refactor: Take their_node_id by value across all handler interfaces
Browse files Browse the repository at this point in the history
In order to maintain interface consistency, we refactor all message
handler interfaces to take `PublicKey` rather than `&PublicKey`, as the
difference in efficiency should be negigible and the former is easier to
handle in binding languages.

Over time, we also want to move (no pun intended) towards all messaging
interfaces using move semantics, so dropping the reference for
`PublicKey` is the first step in this direction.
  • Loading branch information
tnull committed Sep 11, 2024
1 parent f68b8b6 commit 3eaa13e
Show file tree
Hide file tree
Showing 29 changed files with 1,951 additions and 1,954 deletions.
64 changes: 32 additions & 32 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -806,13 +806,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
networks: None,
remote_network_address: None,
};
$source.peer_connected(&$dest.get_our_node_id(), &init_dest, true).unwrap();
$source.peer_connected($dest.get_our_node_id(), &init_dest, true).unwrap();
let init_src = Init {
features: $source.init_features(),
networks: None,
remote_network_address: None,
};
$dest.peer_connected(&$source.get_our_node_id(), &init_src, false).unwrap();
$dest.peer_connected($source.get_our_node_id(), &init_src, false).unwrap();

$source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap();
let open_channel = {
Expand All @@ -825,7 +825,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
}
};

$dest.handle_open_channel(&$source.get_our_node_id(), &open_channel);
$dest.handle_open_channel($source.get_our_node_id(), &open_channel);
let accept_channel = {
if anchors {
let events = $dest.get_and_clear_pending_events();
Expand Down Expand Up @@ -860,7 +860,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
}
};

$source.handle_accept_channel(&$dest.get_our_node_id(), &accept_channel);
$source.handle_accept_channel($dest.get_our_node_id(), &accept_channel);
let funding_output;
{
let mut events = $source.get_and_clear_pending_events();
Expand Down Expand Up @@ -904,7 +904,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
panic!("Wrong event type");
}
};
$dest.handle_funding_created(&$source.get_our_node_id(), &funding_created);
$dest.handle_funding_created($source.get_our_node_id(), &funding_created);

let funding_signed = {
let events = $dest.get_and_clear_pending_msg_events();
Expand All @@ -923,7 +923,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
panic!("Wrong event type");
}

$source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
$source.handle_funding_signed($dest.get_our_node_id(), &funding_signed);
let events = $source.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] {
Expand Down Expand Up @@ -963,7 +963,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
{
for node in $nodes.iter() {
if node.get_our_node_id() == *node_id {
node.handle_channel_ready(&$nodes[idx].get_our_node_id(), msg);
node.handle_channel_ready($nodes[idx].get_our_node_id(), msg);
}
}
} else {
Expand Down Expand Up @@ -1134,7 +1134,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
for update_add in update_add_htlcs.iter() {
out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes());
if !$corrupt_forward {
dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add);
dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), update_add);
} else {
// Corrupt the update_add_htlc message so that its HMAC
// check will fail and we generate a
Expand All @@ -1143,24 +1143,24 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
let mut msg_ser = update_add.encode();
msg_ser[1000] ^= 0xff;
let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg);
dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), &new_msg);
}
}
for update_fulfill in update_fulfill_htlcs.iter() {
out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes());
dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill);
dest.handle_update_fulfill_htlc(nodes[$node].get_our_node_id(), update_fulfill);
}
for update_fail in update_fail_htlcs.iter() {
out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes());
dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail);
dest.handle_update_fail_htlc(nodes[$node].get_our_node_id(), update_fail);
}
for update_fail_malformed in update_fail_malformed_htlcs.iter() {
out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes());
dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed);
dest.handle_update_fail_malformed_htlc(nodes[$node].get_our_node_id(), update_fail_malformed);
}
if let Some(msg) = update_fee {
out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes());
dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg);
dest.handle_update_fee(nodes[$node].get_our_node_id(), &msg);
}
let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
!update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
Expand All @@ -1177,7 +1177,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
break;
}
out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes());
dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed);
dest.handle_commitment_signed(nodes[$node].get_our_node_id(), &commitment_signed);
break;
}
}
Expand All @@ -1186,15 +1186,15 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
for (idx, dest) in nodes.iter().enumerate() {
if dest.get_our_node_id() == *node_id {
out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes());
dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg);
dest.handle_revoke_and_ack(nodes[$node].get_our_node_id(), msg);
}
}
},
events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
for (idx, dest) in nodes.iter().enumerate() {
if dest.get_our_node_id() == *node_id {
out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes());
dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg);
dest.handle_channel_reestablish(nodes[$node].get_our_node_id(), msg);
}
}
},
Expand Down Expand Up @@ -1453,16 +1453,16 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {

0x0c => {
if !chan_a_disconnected {
nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
nodes[0].peer_disconnected(nodes[1].get_our_node_id());
nodes[1].peer_disconnected(nodes[0].get_our_node_id());
chan_a_disconnected = true;
drain_msg_events_on_disconnect!(0);
}
},
0x0d => {
if !chan_b_disconnected {
nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
nodes[1].peer_disconnected(nodes[2].get_our_node_id());
nodes[2].peer_disconnected(nodes[1].get_our_node_id());
chan_b_disconnected = true;
drain_msg_events_on_disconnect!(2);
}
Expand All @@ -1474,13 +1474,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
networks: None,
remote_network_address: None,
};
nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap();
nodes[0].peer_connected(nodes[1].get_our_node_id(), &init_1, true).unwrap();
let init_0 = Init {
features: nodes[0].init_features(),
networks: None,
remote_network_address: None,
};
nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap();
nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap();
chan_a_disconnected = false;
}
},
Expand All @@ -1491,13 +1491,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
networks: None,
remote_network_address: None,
};
nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap();
nodes[1].peer_connected(nodes[2].get_our_node_id(), &init_2, true).unwrap();
let init_1 = Init {
features: nodes[1].init_features(),
networks: None,
remote_network_address: None,
};
nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap();
nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap();
chan_b_disconnected = false;
}
},
Expand Down Expand Up @@ -1534,7 +1534,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {

0x2c => {
if !chan_a_disconnected {
nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
nodes[1].peer_disconnected(nodes[0].get_our_node_id());
chan_a_disconnected = true;
push_excess_b_events!(
nodes[1].get_and_clear_pending_msg_events().drain(..),
Expand All @@ -1550,14 +1550,14 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
},
0x2d => {
if !chan_a_disconnected {
nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
nodes[0].peer_disconnected(nodes[1].get_our_node_id());
chan_a_disconnected = true;
nodes[0].get_and_clear_pending_msg_events();
ab_events.clear();
ba_events.clear();
}
if !chan_b_disconnected {
nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
nodes[2].peer_disconnected(nodes[1].get_our_node_id());
chan_b_disconnected = true;
nodes[2].get_and_clear_pending_msg_events();
bc_events.clear();
Expand All @@ -1570,7 +1570,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
},
0x2e => {
if !chan_b_disconnected {
nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
nodes[1].peer_disconnected(nodes[2].get_our_node_id());
chan_b_disconnected = true;
push_excess_b_events!(
nodes[1].get_and_clear_pending_msg_events().drain(..),
Expand Down Expand Up @@ -1759,13 +1759,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
networks: None,
remote_network_address: None,
};
nodes[0].peer_connected(&nodes[1].get_our_node_id(), &init_1, true).unwrap();
nodes[0].peer_connected(nodes[1].get_our_node_id(), &init_1, true).unwrap();
let init_0 = Init {
features: nodes[0].init_features(),
networks: None,
remote_network_address: None,
};
nodes[1].peer_connected(&nodes[0].get_our_node_id(), &init_0, false).unwrap();
nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap();
chan_a_disconnected = false;
}
if chan_b_disconnected {
Expand All @@ -1774,13 +1774,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
networks: None,
remote_network_address: None,
};
nodes[1].peer_connected(&nodes[2].get_our_node_id(), &init_2, true).unwrap();
nodes[1].peer_connected(nodes[2].get_our_node_id(), &init_2, true).unwrap();
let init_1 = Init {
features: nodes[1].init_features(),
networks: None,
remote_network_address: None,
};
nodes[2].peer_connected(&nodes[1].get_our_node_id(), &init_1, false).unwrap();
nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap();
chan_b_disconnected = false;
}

Expand Down
4 changes: 2 additions & 2 deletions fuzz/src/onion_message.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ pub fn do_test<L: Logger>(data: &[u8], logger: &L) {
features.set_onion_messages_optional();
let init = msgs::Init { features, networks: None, remote_network_address: None };

onion_messenger.peer_connected(&peer_node_id, &init, false).unwrap();
onion_messenger.handle_onion_message(&peer_node_id, &msg);
onion_messenger.peer_connected(peer_node_id, &init, false).unwrap();
onion_messenger.handle_onion_message(peer_node_id, &msg);
}
}

Expand Down
20 changes: 10 additions & 10 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1648,7 +1648,7 @@ mod tests {
};
nodes[i]
.node
.peer_connected(&nodes[j].node.get_our_node_id(), &init_i, true)
.peer_connected(nodes[j].node.get_our_node_id(), &init_i, true)
.unwrap();
let init_j = Init {
features: nodes[i].node.init_features(),
Expand All @@ -1657,7 +1657,7 @@ mod tests {
};
nodes[j]
.node
.peer_connected(&nodes[i].node.get_our_node_id(), &init_j, false)
.peer_connected(nodes[i].node.get_our_node_id(), &init_j, false)
.unwrap();
}
}
Expand Down Expand Up @@ -1685,14 +1685,14 @@ mod tests {
MessageSendEvent::SendFundingCreated,
$node_b.node.get_our_node_id()
);
$node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &msg_a);
$node_b.node.handle_funding_created($node_a.node.get_our_node_id(), &msg_a);
get_event!($node_b, Event::ChannelPending);
let msg_b = get_event_msg!(
$node_b,
MessageSendEvent::SendFundingSigned,
$node_a.node.get_our_node_id()
);
$node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &msg_b);
$node_a.node.handle_funding_signed($node_b.node.get_our_node_id(), &msg_b);
get_event!($node_a, Event::ChannelPending);
tx
}};
Expand All @@ -1709,13 +1709,13 @@ mod tests {
MessageSendEvent::SendOpenChannel,
$node_b.node.get_our_node_id()
);
$node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &msg_a);
$node_b.node.handle_open_channel($node_a.node.get_our_node_id(), &msg_a);
let msg_b = get_event_msg!(
$node_b,
MessageSendEvent::SendAcceptChannel,
$node_a.node.get_our_node_id()
);
$node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &msg_b);
$node_a.node.handle_accept_channel($node_b.node.get_our_node_id(), &msg_b);
}};
}

Expand Down Expand Up @@ -2115,10 +2115,10 @@ mod tests {
.funding_transaction_generated(temporary_channel_id, node_1_id, funding_tx.clone())
.unwrap();
let msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_1_id);
nodes[1].node.handle_funding_created(&node_0_id, &msg_0);
nodes[1].node.handle_funding_created(node_0_id, &msg_0);
get_event!(nodes[1], Event::ChannelPending);
let msg_1 = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_0_id);
nodes[0].node.handle_funding_signed(&node_1_id, &msg_1);
nodes[0].node.handle_funding_signed(node_1_id, &msg_1);
let _ = channel_pending_recv
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
.expect("ChannelPending not handled within deadline");
Expand All @@ -2128,10 +2128,10 @@ mod tests {
let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_1_id);
confirm_transaction(&mut nodes[1], &funding_tx);
let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, node_0_id);
nodes[0].node.handle_channel_ready(&node_1_id, &bs_funding);
nodes[0].node.handle_channel_ready(node_1_id, &bs_funding);
let _as_channel_update =
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_1_id);
nodes[1].node.handle_channel_ready(&node_0_id, &as_funding);
nodes[1].node.handle_channel_ready(node_0_id, &as_funding);
let _bs_channel_update =
get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_0_id);
let broadcast_funding =
Expand Down
Loading

0 comments on commit 3eaa13e

Please sign in to comment.