From 8d4109f8cb3af92c4aa4ca2158046b6ad92e9ac8 Mon Sep 17 00:00:00 2001 From: plebhash Date: Mon, 18 Nov 2024 09:33:51 -0300 Subject: [PATCH 01/18] docs for channel_logic modules --- .../src/channel_logic/channel_factory.rs | 77 ++++++++++++++++--- .../roles-logic-sv2/src/channel_logic/mod.rs | 6 ++ .../src/channel_logic/proxy_group_channel.rs | 5 ++ 3 files changed, 76 insertions(+), 12 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index 943349ebd..cb74c4f76 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -1,3 +1,5 @@ +//! Contains logic for creating channels. + use super::extended_to_standard_job; use crate::{ common_properties::StandardChannel, @@ -48,7 +50,7 @@ pub struct PartialSetCustomMiningJob { pub future_job: bool, } -/// Represent the action that needs to be done when a new share is received. +/// Represents the action that needs to be done when a new share is received. #[derive(Debug, Clone)] pub enum OnNewShare { /// Used when the received is malformed, is for an inexistent channel or do not meet downstream @@ -74,7 +76,7 @@ pub enum OnNewShare { } impl OnNewShare { - /// convert standard share into extended share + /// Convert standard share into extended share pub fn into_extended(&mut self, extranonce: Vec, up_id: u32) { match self { OnNewShare::SendErrorDownstream(_) => (), @@ -119,7 +121,7 @@ impl OnNewShare { } } -/// A share can be both extended or standard +/// A share can be either extended or standard #[derive(Clone, Debug)] pub enum Share { Extended(SubmitSharesExtended<'static>), @@ -127,7 +129,7 @@ pub enum Share { Standard((SubmitSharesStandard, u32)), } -/// helper type used before a `SetNewPrevHash` has a channel_id +/// Helper type used before a `SetNewPrevHash` has a channel_id #[derive(Clone, Debug)] pub struct StagedPhash { job_id: u32, @@ -137,6 +139,7 @@ pub struct StagedPhash { } impl StagedPhash { + /// converts a Staged PrevHash into a SetNewPrevHash message pub fn into_set_p_hash( &self, channel_id: u32, @@ -153,24 +156,31 @@ impl StagedPhash { } impl Share { + /// get share sequence number pub fn get_sequence_number(&self) -> u32 { match self { Share::Extended(s) => s.sequence_number, Share::Standard(s) => s.0.sequence_number, } } + + /// get share channel id pub fn get_channel_id(&self) -> u32 { match self { Share::Extended(s) => s.channel_id, Share::Standard(s) => s.0.channel_id, } } + + /// get share timestamp pub fn get_n_time(&self) -> u32 { match self { Share::Extended(s) => s.ntime, Share::Standard(s) => s.0.ntime, } } + + /// get share nonce pub fn get_nonce(&self) -> u32 { match self { Share::Extended(s) => s.nonce, @@ -178,6 +188,7 @@ impl Share { } } + /// get share job id pub fn get_job_id(&self) -> u32 { match self { Share::Extended(s) => s.job_id, @@ -185,6 +196,7 @@ impl Share { } } + /// get share version pub fn get_version(&self) -> u32 { match self { Share::Extended(s) => s.version, @@ -194,7 +206,7 @@ impl Share { } #[derive(Debug)] -/// Basic logic shared between all the channel factory. +/// Basic logic shared between all the channel factories struct ChannelFactory { ids: Arc>, standard_channels_for_non_hom_downstreams: @@ -236,13 +248,14 @@ impl ChannelFactory { ), } } + /// Called when a `OpenExtendedMiningChannel` message is received. /// Here we save the downstream's target (based on hashrate) and the /// channel's extranonce details before returning the relevant SV2 mining messages /// to be sent downstream. For the mining messages, we will first return an /// `OpenExtendedMiningChannelSuccess` if the channel is successfully opened. Then we add /// the `NewExtendedMiningJob` and `SetNewPrevHash` messages if the relevant data is - /// available. If the channel opening fails, we return `OpenExtenedMiningChannelError`. + /// available. If the channel opening fails, we return `OpenExtendedMiningChannelError`. pub fn new_extended_channel( &mut self, request_id: u32, @@ -313,8 +326,9 @@ impl ChannelFactory { )]) } } + /// Called when we want to replicate a channel already opened by another actor. - /// is used only in the jd client from the template provider module to mock a pool. + /// It is used only in the jd client from the template provider module to mock a pool. /// Anything else should open channel with the new_extended_channel function pub fn replicate_upstream_extended_channel_only_jd( &mut self, @@ -672,6 +686,7 @@ impl ChannelFactory { self.last_prev_hash = Some((m, ids)); Ok(()) } + /// Called when a `NewExtendedMiningJob` arrives. If the job is future, we add it to the future /// queue. If the job is not future, we pair it with a the most recent prev hash fn on_new_extended_mining_job( @@ -907,6 +922,7 @@ impl ChannelFactory { Ok(OnNewShare::SendErrorDownstream(error)) } } + /// Returns the downstream target and extranonce for the channel fn get_channel_specific_mining_info(&self, m: &Share) -> Option<(mining_sv2::Target, Vec)> { match m { @@ -968,7 +984,7 @@ impl ChannelFactory { } } -/// Used by a pool to in order to manage all downstream channel. It add job creation capabilities +/// Used by a pool to in order to manage all downstream channel. It adds job creation capabilities /// to ChannelFactory. #[derive(Debug)] pub struct PoolChannelFactory { @@ -976,11 +992,12 @@ pub struct PoolChannelFactory { job_creator: JobsCreators, pool_coinbase_outputs: Vec, pool_signature: String, - // extedned_channel_id -> SetCustomMiningJob + // extended_channel_id -> SetCustomMiningJob negotiated_jobs: HashMap, BuildNoHashHasher>, } impl PoolChannelFactory { + /// constructor pub fn new( ids: Arc>, extranonces: ExtendedExtranonce, @@ -1019,6 +1036,7 @@ impl PoolChannelFactory { negotiated_jobs: HashMap::with_hasher(BuildNoHashHasher::default()), } } + /// Calls [`ChannelFactory::add_standard_channel`] pub fn add_standard_channel( &mut self, @@ -1030,6 +1048,7 @@ impl PoolChannelFactory { self.inner .add_standard_channel(request_id, downstream_hash_rate, is_header_only, id) } + /// Calls [`ChannelFactory::new_extended_channel`] pub fn new_extended_channel( &mut self, @@ -1040,6 +1059,7 @@ impl PoolChannelFactory { self.inner .new_extended_channel(request_id, hash_rate, min_extranonce_size) } + /// Called when we want to replicate a channel already opened by another actor. /// is used only in the jd client from the template provider module to mock a pool. /// Anything else should open channel with the new_extended_channel function @@ -1057,6 +1077,7 @@ impl PoolChannelFactory { extranonce_size, ) } + /// Called only when a new prev hash is received by a Template Provider. It matches the /// message with a `job_id` and calls [`ChannelFactory::on_new_prev_hash`] /// it return the job_id @@ -1074,6 +1095,7 @@ impl PoolChannelFactory { self.inner.on_new_prev_hash(new_prev_hash)?; Ok(job_id) } + /// Called only when a new template is received by a Template Provider pub fn on_new_template( &mut self, @@ -1087,6 +1109,7 @@ impl PoolChannelFactory { )?; self.inner.on_new_extended_mining_job(new_job) } + /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the /// shares against the channel's respective target and return `OnNewShare` to let us know if /// and where the shares should be relayed @@ -1213,11 +1236,13 @@ impl PoolChannelFactory { ) } } + /// Utility function to return a new group id pub fn new_group_id(&mut self) -> u32 { let new_id = self.inner.ids.safe_lock(|ids| ids.new_group_id()).unwrap(); new_id } + /// Utility function to return a new standard channel id pub fn new_standard_id_for_hom(&mut self) -> u32 { let hom_group_id = 0; @@ -1228,6 +1253,7 @@ impl PoolChannelFactory { .unwrap(); new_id } + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce /// space) pub fn extranonce_from_downstream_extranonce( @@ -1238,6 +1264,7 @@ impl PoolChannelFactory { .extranonces .extranonce_from_downstream_extranonce(ext) } + /// Called when a new custom mining job arrives pub fn on_new_set_custom_mining_job( &mut self, @@ -1265,16 +1292,18 @@ impl PoolChannelFactory { true } + /// get extended channel ids pub fn get_extended_channels_ids(&self) -> Vec { self.inner.extended_channels.keys().copied().collect() } + /// update coinbase outputs pub fn update_pool_outputs(&mut self, outs: Vec) { self.pool_coinbase_outputs = outs; } - /// calls [`ChannelFactory::update_target_for_channel`] - /// Set a partucular downstream channel target. + /// Calls [`ChannelFactory::update_target_for_channel`] + /// Set a particular downstream channel target. pub fn update_target_for_channel( &mut self, channel_id: u32, @@ -1282,7 +1311,8 @@ impl PoolChannelFactory { ) -> Option { self.inner.update_target_for_channel(channel_id, new_target) } - // Set the target for this channel. This is the upstream target. + + /// Set the target for this channel. This is the upstream target. pub fn set_target(&mut self, new_target: &mut Target) { self.inner.kind.set_target(new_target); } @@ -1301,6 +1331,7 @@ pub struct ProxyExtendedChannelFactory { } impl ProxyExtendedChannelFactory { + /// constructor #[allow(clippy::too_many_arguments)] pub fn new( ids: Arc>, @@ -1353,6 +1384,7 @@ impl ProxyExtendedChannelFactory { extended_channel_id, } } + /// Calls [`ChannelFactory::add_standard_channel`] pub fn add_standard_channel( &mut self, @@ -1364,6 +1396,7 @@ impl ProxyExtendedChannelFactory { self.inner .add_standard_channel(request_id, downstream_hash_rate, id_header_only, id) } + /// Calls [`ChannelFactory::new_extended_channel`] pub fn new_extended_channel( &mut self, @@ -1374,6 +1407,7 @@ impl ProxyExtendedChannelFactory { self.inner .new_extended_channel(request_id, hash_rate, min_extranonce_size) } + /// Called only when a new prev hash is received by a Template Provider when job declaration is /// used. It matches the message with a `job_id`, creates a new custom job, and calls /// [`ChannelFactory::on_new_prev_hash`] @@ -1417,6 +1451,7 @@ impl ProxyExtendedChannelFactory { panic!("A channel factory without job creator do not have declaration capabilities") } } + /// Called only when a new template is received by a Template Provider when job declaration is /// used. It creates a new custom job and calls /// [`ChannelFactory::on_new_extended_mining_job`] @@ -1687,12 +1722,17 @@ impl ProxyExtendedChannelFactory { ) -> Result, BuildNoHashHasher>, Error> { self.inner.on_new_extended_mining_job(m) } + + /// set new target pub fn set_target(&mut self, new_target: &mut Target) { self.inner.kind.set_target(new_target); } + + /// get last valid job version pub fn last_valid_job_version(&self) -> Option { self.inner.last_valid_job.as_ref().map(|j| j.0.version) } + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce /// space) pub fn extranonce_from_downstream_extranonce( @@ -1703,6 +1743,7 @@ impl ProxyExtendedChannelFactory { .extranonces .extranonce_from_downstream_extranonce(ext) } + /// Returns the most recent prev hash pub fn last_prev_hash(&self) -> Option> { self.inner @@ -1710,27 +1751,38 @@ impl ProxyExtendedChannelFactory { .as_ref() .map(|f| f.0.prev_hash.clone()) } + + /// last min ntime pub fn last_min_ntime(&self) -> Option { self.inner.last_prev_hash.as_ref().map(|f| f.0.min_ntime) } + + /// last nbits pub fn last_nbits(&self) -> Option { self.inner.last_prev_hash.as_ref().map(|f| f.0.nbits) } + + /// extranonce_size pub fn extranonce_size(&self) -> usize { self.inner.extranonces.get_len() } + + /// extranonce_2 size pub fn channel_extranonce2_size(&self) -> usize { self.inner.extranonces.get_len() - self.inner.extranonces.get_range0_len() } // Only used when the proxy is using Job Declaration + /// update pool outputs pub fn update_pool_outputs(&mut self, outs: Vec) { self.pool_coinbase_outputs = Some(outs); } + /// get this channel id pub fn get_this_channel_id(&self) -> u32 { self.extended_channel_id } + /// returns the extranonce1 len of the upstream. For a proxy, this would /// be the extranonce_prefix len pub fn get_upstream_extranonce1_len(&self) -> usize { @@ -1755,6 +1807,7 @@ pub enum ExtendedChannelKind { Pool, } impl ExtendedChannelKind { + /// set target pub fn set_target(&mut self, new_target: &mut Target) { match self { ExtendedChannelKind::Proxy { upstream_target } diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs index 7b5f0feed..05dbb3d12 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs @@ -1,3 +1,9 @@ +//! A module for managing channels on applications. +//! +//! Divided in two submodules: +//! - [`channel_factory`] +//! - [`proxy_group_channel`] + pub mod channel_factory; pub mod proxy_group_channel; diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs index 65f4cdb3d..452c39fbb 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs @@ -1,3 +1,5 @@ +//! Contains logic for managing Standard Channels via Group Channels. + use crate::{common_properties::StandardChannel, parsers::Mining, Error}; use mining_sv2::{ @@ -14,6 +16,7 @@ pub struct GroupChannels { channels: HashMap, } impl GroupChannels { + /// constructor pub fn new() -> Self { Self { channels: HashMap::new(), @@ -70,6 +73,8 @@ impl GroupChannels { None => Err(Error::GroupIdNotFound), } } + + /// get group channel ids pub fn ids(&self) -> Vec { self.channels.keys().copied().collect() } From c98dd5aeceda51563b944ba6a783db02649d27c3 Mon Sep 17 00:00:00 2001 From: plebhash Date: Tue, 19 Nov 2024 10:35:44 -0300 Subject: [PATCH 02/18] docs for job_dispatcher module --- .../v2/roles-logic-sv2/src/job_dispatcher.rs | 49 ++++++------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs index 019f9f274..4eac84734 100644 --- a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs +++ b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs @@ -41,6 +41,8 @@ pub fn extended_to_standard_job_for_group_channel<'a>( merkle_root: merkle_root?.try_into().ok()?, }) } + +// helper struct to easily calculate block hashes from headers #[allow(dead_code)] struct BlockHeader<'a> { version: u32, @@ -67,36 +69,13 @@ impl<'a> BlockHeader<'a> { } } -#[allow(dead_code)] -fn target_from_shares( - job: &DownstreamJob, - prev_hash: &[u8], - nbits: u32, - share: &SubmitSharesStandard, -) -> Target { - let header = BlockHeader { - version: share.version, - prev_hash, - merkle_root: &job.merkle_root, - timestamp: share.ntime, - nbits, - nonce: share.nonce, - }; - header.hash() -} - +// helper struct to identify Standard Jobs being managed for downstream #[derive(Debug)] struct DownstreamJob { merkle_root: Vec, extended_job_id: u32, } -#[derive(Debug)] -struct ExtendedJobs { - #[allow(dead_code)] - upstream_target: Vec, -} - /// Used by proxies to keep track of standard jobs in the group channel /// created with the sv2 server #[derive(Debug)] @@ -117,6 +96,7 @@ pub struct GroupChannelJobDispatcher { nbits: u32, } +/// Used to signal if submitted shares correlate to valid jobs pub enum SendSharesResponse { //ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), Valid(SubmitSharesStandard), @@ -124,6 +104,7 @@ pub enum SendSharesResponse { } impl GroupChannelJobDispatcher { + /// constructor pub fn new(ids: Arc>) -> Self { Self { target: [0_u8; 32].into(), @@ -497,21 +478,21 @@ mod tests { job_id: u32, ) { let shares = SubmitSharesStandard { - /// Channel identification. + // Channel identification. channel_id: standard_channel_id, - /// Unique sequential identifier of the submit within the channel. + // Unique sequential identifier of the submit within the channel. sequence_number: 0, - /// Identifier of the job as provided by *NewMiningJob* or - /// *NewExtendedMiningJob* message. + // Identifier of the job as provided by *NewMiningJob* or + // *NewExtendedMiningJob* message. job_id, - /// Nonce leading to the hash being submitted. + // Nonce leading to the hash being submitted. nonce: 1, - /// The nTime field in the block header. This MUST be greater than or equal - /// to the header_timestamp field in the latest SetNewPrevHash message - /// and lower than or equal to that value plus the number of seconds since - /// the receipt of that message. + // The nTime field in the block header. This MUST be greater than or equal + // to the header_timestamp field in the latest SetNewPrevHash message + // and lower than or equal to that value plus the number of seconds since + // the receipt of that message. ntime: 1, - /// Full nVersion field. + // Full nVersion field. version: 1, }; let mut faulty_shares = shares.clone(); From 55002c8f5f5fad5ae631b6bfb181237c45690c9e Mon Sep 17 00:00:00 2001 From: plebhash Date: Wed, 20 Nov 2024 17:57:42 -0300 Subject: [PATCH 03/18] docs for parsers module --- protocols/v2/roles-logic-sv2/src/parsers.rs | 25 +++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/parsers.rs b/protocols/v2/roles-logic-sv2/src/parsers.rs index d30ca6ece..bdf9e8640 100644 --- a/protocols/v2/roles-logic-sv2/src/parsers.rs +++ b/protocols/v2/roles-logic-sv2/src/parsers.rs @@ -1,5 +1,7 @@ -//! The parsers modules provides logic to convert raw SV2 message data into rust types -//! as well as logic to handle conversions among SV2 rust types +//! The parsers module provides logic to convert raw Sv2 message data into rust types, +//! as well as logic to handle conversions among Sv2 rust types +//! +//! Most of the logic on this module is tightly coupled with the binary_sv2 crate. use crate::Error; @@ -86,8 +88,12 @@ use mining_sv2::{ use core::convert::{TryFrom, TryInto}; use tracing::error; +// todo: fix this, PoolMessages shouldn't be a generic parser +/// An alias to a generic parser pub type AnyMessage<'a> = PoolMessages<'a>; +/// A parser of messages that are common to all Sv2 subprotocols, to be used for parsing raw +/// messages #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum CommonMessages<'a> { @@ -99,6 +105,7 @@ pub enum CommonMessages<'a> { SetupConnectionSuccess(SetupConnectionSuccess), } +/// A parser of messages of Template Distribution subprotocol, to be used for parsing raw messages #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum TemplateDistribution<'a> { @@ -116,6 +123,7 @@ pub enum TemplateDistribution<'a> { SubmitSolution(SubmitSolution<'a>), } +/// A parser of messages of Job Declaration subprotocol, to be used for parsing raw messages #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum JobDeclaration<'a> { @@ -140,6 +148,7 @@ pub enum JobDeclaration<'a> { SubmitSolution(SubmitSolutionJd<'a>), } +/// A parser of messages of Mining subprotocol, to be used for parsing raw messages #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum Mining<'a> { @@ -187,6 +196,7 @@ pub enum Mining<'a> { } impl<'a> Mining<'a> { + /// converter into static lifetime pub fn into_static(self) -> Mining<'static> { match self { Mining::CloseChannel(m) => Mining::CloseChannel(m.into_static()), @@ -225,8 +235,12 @@ impl<'a> Mining<'a> { } } +/// A trait that every Sv2 message parser must implement. +/// It helps parsing from Rust types to raw messages. pub trait IsSv2Message { + /// get message type fn message_type(&self) -> u8; + /// get channel bit fn channel_bit(&self) -> bool; } @@ -584,6 +598,7 @@ impl<'decoder> Deserialize<'decoder> for MiningDeviceMessages<'decoder> { } } +/// A list of 8-bit message type variants that are common to all Sv2 subprotocols #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -634,6 +649,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for CommonMessages<'a> { } } +/// A list of 8-bit message type variants under Template Distribution subprotocol #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -710,6 +726,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for TemplateDistribution<'a> { } } +/// A list of 8-bit message type variants under Job Declaration subprotocol #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -808,6 +825,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for JobDeclaration<'a> { } } +/// A list of 8-bit message type variants under Mining subprotocol #[derive(Debug, Clone, Copy)] #[repr(u8)] #[allow(clippy::enum_variant_names)] @@ -976,6 +994,7 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for Mining<'a> { } } +/// A parser of messages that a Mining Device could send #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum MiningDeviceMessages<'a> { @@ -1017,6 +1036,8 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for MiningDeviceMessages<'a> { } } +// todo: fix this, PoolMessages should only contain Mining and Common +/// A parser of all messages a Pool could send #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum PoolMessages<'a> { From 3bb65014c43384be15af12a6ea5318095dd8a7ac Mon Sep 17 00:00:00 2001 From: plebhash Date: Thu, 21 Nov 2024 17:59:16 -0300 Subject: [PATCH 04/18] docs for roles_logic_sv2::errors --- protocols/v2/roles-logic-sv2/src/errors.rs | 56 ++++++++++++++++++++-- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/errors.rs b/protocols/v2/roles-logic-sv2/src/errors.rs index 20c4bcd55..cc05fede7 100644 --- a/protocols/v2/roles-logic-sv2/src/errors.rs +++ b/protocols/v2/roles-logic-sv2/src/errors.rs @@ -8,58 +8,106 @@ use binary_sv2::Error as BinarySv2Error; use std::fmt::{self, Display, Formatter}; #[derive(Debug)] -/// No NoPairableUpstreamT(min_v, max_v, all falgs supported)) pub enum Error { - /// Errors if payload size is too big to fit into a frame. + /// Payload size is too big to fit into a frame BadPayloadSize, + /// Expected Length of 32, but received different length ExpectedLen32(usize), + /// Error serializing/deserializing binary format BinarySv2Error(BinarySv2Error), + /// Downstream is not connected anymore DownstreamDown, + /// A channel was attempted to be added to an Upstream, but no groups are specified NoGroupsFound, + /// Unexpected message received. UnexpectedMessage(u8), + /// Extended channels do not have group IDs NoGroupIdOnExtendedChannel, - /// (`min_v`, `max_v`, all flags supported) + /// No pairable upstream. Parameters are: (`min_v`, `max_v`, all flags supported) NoPairableUpstream((u16, u16, u32)), + /// No compatible upstream NoCompatibleUpstream(CommonDownstreamData), /// Error if the hashmap `future_jobs` field in the `GroupChannelJobDispatcher` is empty. NoFutureJobs, + /// No Downstreams connected NoDownstreamsConnected, + /// PrevHash requires non-existent Job Id PrevHashRequireNonExistentJobId(u32), + /// Request Id not mapped RequestIdNotMapped(u32), + /// There are no upstream connected NoUpstreamsConnected, + /// Protocol has not been implemented, but should be UnimplementedProtocol, + /// Unexpected `PoolMessage` type UnexpectedPoolMessage, + /// Upstream is answering with a wrong request ID {} or + /// `DownstreamMiningSelector::on_open_standard_channel_request` has not been called + /// before relaying open channel request to upstream UnknownRequestId(u32), + /// No more extranonces NoMoreExtranonces, + /// A non future job always expect a previous new prev hash JobIsNotFutureButPrevHashNotPresent, + /// If a channel is neither extended or part of a pool, + /// the only thing to do when a OpenStandardChannel is received + /// is to relay it upstream with and updated request id ChannelIsNeitherExtendedNeitherInAPool, + /// No more available extranonces for downstream" ExtranonceSpaceEnded, + /// Impossible to calculate merkle root ImpossibleToCalculateMerkleRoot, + /// Group Id not found GroupIdNotFound, + /// A share has been received but no job for it exist ShareDoNotMatchAnyJob, + /// A share has been recived but no channel for it exist ShareDoNotMatchAnyChannel, + /// Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase InvalidCoinbase, + /// Value remaining in coinbase output was not correctly updated (it's equal to 0) ValueRemainingNotUpdated, + /// Unknown script type in config UnknownOutputScriptType, + /// Invalid `output_script_value` for script type. It must be a valid public key/script InvalidOutputScript, + /// Empty coinbase outputs in config EmptyCoinbaseOutputs, + /// Block header version cannot be bigger than `i32::MAX` VersionTooBig, + /// Tx version cannot be bigger than `i32::MAX` TxVersionTooBig, + /// Tx version cannot be lower than 1 TxVersionTooLow, + /// Impossible to decode tx TxDecodingError(String), + /// No downstream has been registered for this channel id NotFoundChannelId, + /// Impossible to create a standard job for channel + /// because no valid job has been received from upstream yet NoValidJob, + /// Impossible to create an extended job for channel + /// because no valid job has been received from upstream yet NoValidTranslatorJob, + /// Impossible to retrieve a template for the required job id NoTemplateForId, + /// Impossible to retrieve a template for the required template id NoValidTemplate(String), + /// Invalid extranonce size. Params: (required min, requested) InvalidExtranonceSize(u16, u16), + /// Poison Lock PoisonLock(String), + /// Invalid BIP34 bytes InvalidBip34Bytes(Vec), - // (downstream_job_id, upstream_job_id) + /// Channel Factory did not update job. Params: (downstream_job_id, upstream_job_id) JobNotUpdated(u32, u32), + /// Impossible to get Target TargetError(InputError), + /// Impossible to get Hashrate HashrateError(InputError), + /// Message is well formatted but can not be handled LogicErrorMessage(std::boxed::Box>), + /// JD server cannot propagate the block due to missing transactions JDSMissingTransactions, } From ade2c5eaad1c5185f10e1d86212c57335492d7ad Mon Sep 17 00:00:00 2001 From: plebhash Date: Thu, 21 Nov 2024 18:31:31 -0300 Subject: [PATCH 05/18] docs for roles_logic_sv2::common_properties --- .../v2/roles-logic-sv2/src/common_properties.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/common_properties.rs b/protocols/v2/roles-logic-sv2/src/common_properties.rs index 5c25624b1..ba7f57076 100644 --- a/protocols/v2/roles-logic-sv2/src/common_properties.rs +++ b/protocols/v2/roles-logic-sv2/src/common_properties.rs @@ -1,4 +1,5 @@ -//! Traits that implements very basic properties that every implementation should use +//! This module defines traits for properties that every SRI-based application should implement + use crate::selectors::{ DownstreamMiningSelector, DownstreamSelector, NullDownstreamMiningSelector, }; @@ -24,7 +25,7 @@ pub struct PairSettings { pub flags: u32, } -/// A trait that defines the basic properties of an upstream node. +/// General properties that every Sv2 compatible upstream node must implement. pub trait IsUpstream + ?Sized> { /// Used to bitcoin protocol version for the channel. fn get_version(&self) -> u16; @@ -75,26 +76,33 @@ pub struct StandardChannel { pub extranonce: Extranonce, } -/// General properties that every Sv2 compatible mining upstream nodes must implement. +/// General properties that every Sv2 compatible mining upstream node must implement. pub trait IsMiningUpstream + ?Sized>: IsUpstream { /// should return total hash rate local to the node fn total_hash_rate(&self) -> u64; + /// add hashrate to the node fn add_hash_rate(&mut self, to_add: u64); + /// get open channels on the node fn get_opened_channels(&mut self) -> &mut Vec; + /// update channels fn update_channels(&mut self, c: UpstreamChannel); + /// check if node is limited to hom fn is_header_only(&self) -> bool { has_requires_std_job(self.get_flags()) } } -/// General properties that every Sv2 compatible mining downstream nodes must implement. +/// General properties that every Sv2 compatible downstream node must implement. pub trait IsDownstream { + /// get downstream mining data fn get_downstream_mining_data(&self) -> CommonDownstreamData; } +/// General properties that every Sv2 compatible mining downstream node must implement. pub trait IsMiningDownstream: IsDownstream { + /// check if node is doing hom fn is_header_only(&self) -> bool { self.get_downstream_mining_data().header_only } From d7fd4faccf8a997f34b462da7dc3246c049439cf Mon Sep 17 00:00:00 2001 From: plebhash Date: Thu, 21 Nov 2024 19:16:27 -0300 Subject: [PATCH 06/18] docs for roles_logic_sv2::utils --- protocols/v2/roles-logic-sv2/src/utils.rs | 73 +++++++++++++++-------- 1 file changed, 48 insertions(+), 25 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index d4b6f8944..f818eba1f 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -1,3 +1,5 @@ +//! A collection of helper primitives + use std::{ convert::{TryFrom, TryInto}, ops::{Div, Mul}, @@ -6,10 +8,9 @@ use std::{ }; use binary_sv2::{Seq064K, ShortTxId, U256}; +use bitcoin::Block; use job_declaration_sv2::{DeclareMiningJob, SubmitSolutionJd}; use siphasher::sip::SipHasher24; -//compact_target_from_u256 -use bitcoin::Block; use stratum_common::{ bitcoin, bitcoin::{ @@ -29,13 +30,15 @@ use tracing::error; use crate::errors::Error; -/// Generator of unique ids +/// Generator of unique ids. +/// It keeps an internal counter, which is incremented every time a new unique id is requested. #[derive(Debug, PartialEq, Eq, Clone)] pub struct Id { state: u32, } impl Id { + /// constructor pub fn new() -> Self { Self { state: 0 } } @@ -53,7 +56,21 @@ impl Default for Id { } } -/// Safer Mutex wrapper +/// A custom `Mutex` implementation that provides enhanced safety and ergonomics over +/// the standard `std::sync::Mutex`. +/// +/// This `Mutex` offers the following features: +/// - **Closure-Based Locking:** The `safe_lock` method encapsulates the locking process, ensuring +/// the lock is automatically released after the closure completes. +/// - **Error Handling:** `safe_lock` enforces explicit handling of potential `PoisonError` +/// conditions, reducing the risk of panics caused by poisoned locks. +/// - **Panic-Safe Option:** The `super_safe_lock` method provides an alternative that unwraps the +/// result of `safe_lock`, with optional runtime safeguards against panics. +/// - **Extensibility:** Includes feature-gated functionality to customize behavior, such as +/// stricter runtime checks using external tools like `no-panic`. +/// +/// This design minimizes the risk of common concurrency pitfalls and promotes safer +/// handling of shared mutable state. #[derive(Debug)] pub struct Mutex(Mutex_); @@ -79,6 +96,7 @@ impl Mutex { Ok(return_value) } + /// Mutex super safe lock pub fn super_safe_lock(&self, thunk: F) -> Ret where F: FnOnce(&mut T) -> Ret, @@ -111,10 +129,12 @@ impl Mutex { //} } + /// Mutex constructor pub fn new(v: T) -> Self { Mutex(Mutex_::new(v)) } + /// remove from Mutex pub fn to_remove(&self) -> Result, PoisonError>> { self.0.lock() } @@ -163,14 +183,13 @@ pub fn merkle_root_from_path>( Some(merkle_root_from_path_(coinbase_id, path).to_vec()) } -// TODO remove when we have https://github.com/rust-bitcoin/rust-bitcoin/issues/1319 +/// calculate merkle root from path pub fn merkle_root_from_path_>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { match path.len() { 0 => coinbase_id, _ => reduce_path(coinbase_id, path), } } -// TODO remove when we have https://github.com/rust-bitcoin/rust-bitcoin/issues/1319 fn reduce_path>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { let mut root = coinbase_id; for node in path { @@ -183,9 +202,7 @@ fn reduce_path>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { root } -// -// Coinbase output construction utils -// +/// Coinbase output construction utils #[derive(Debug, Clone)] pub struct CoinbaseOutput { pub output_script_type: String, @@ -252,23 +269,20 @@ impl TryFrom for Script { } } +/// A list of potential errors during conversion between hashrate and target #[derive(Debug)] pub enum InputError { NegativeInput, DivisionByZero, } -/// The pool set a target for each miner. Each target is calibrated on the hashrate of the miner. -/// The following function takes as input a miner hashrate and the shares per minute requested by -/// the pool. The output t is the target (in big endian) for the miner with that hashrate. The -/// miner that mines with target t produces the requested number of shares per minute. +/// Calculates the target (in big endian) given some hashrate and share frequency (per minute). /// -/// -/// If we want a speficic number of shares per minute from a miner of known hashrate, +/// If we want a specific number of shares per minute from a miner of known hashrate, /// how do we set the adequate target? /// -/// According to [1] and [2], it is possible to model the probability of finding a block with -/// a random variable X whose distribution is negtive hypergeometric [3]. +/// According to \[1] and \[2], it is possible to model the probability of finding a block with +/// a random variable X whose distribution is negtive hypergeometric \[3]. /// Such a variable is characterized as follows. Say that there are n (2^256) elements (possible /// hash values), of which t (values <= target) are defined as success and the remaining as /// failures. The variable X has codomain the positive integers, and X=k is the event where element @@ -284,11 +298,11 @@ pub enum InputError { /// correctness of our calculations. Thus, if the pool wants on average a share every s /// seconds from a miner with hashrate h, then the target t for the miner is t = (2^256-sh)/(sh+1). /// -/// [1] https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742 -/// [2] https://www.zora.uzh.ch/id/eprint/173483/1/SSRN-id3399742-2.pdf -/// [3] https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution -/// bdiff: 0x00000000ffff0000000000000000000000000000000000000000000000000000 -/// https://en.bitcoin.it/wiki/Difficulty#How_soon_might_I_expect_to_generate_a_block.3F +/// \[1] [https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742) +/// +/// \[2] [https://www.zora.uzh.ch/id/eprint/173483/1/SSRN-id3399742-2.pdf](https://www.zora.uzh.ch/id/eprint/173483/1/SSRN-id3399742-2.pdf) +/// +/// \[3] [https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution](https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution) pub fn hash_rate_to_target( hashrate: f64, share_per_min: f64, @@ -384,6 +398,7 @@ fn from_uint128_to_u128(input: Uint128) -> u128 { u128::from_be_bytes(input) } +/// helper converter u128 to uint256 pub fn from_u128_to_uint256(input: u128) -> Uint256 { let input: [u8; 16] = input.to_be_bytes(); let mut be_bytes = [0_u8; 32]; @@ -576,6 +591,7 @@ fn test_merkle_root_from_path() { ); } +/// Converts a u256 to a BlockHash type pub fn u256_to_block_hash(v: U256<'static>) -> BlockHash { let hash: [u8; 32] = v.to_vec().try_into().unwrap(); let hash = Hash::from_inner(hash); @@ -659,6 +675,7 @@ pub fn target_from_hash_rate(hash_per_second: f32, share_per_min: f32) -> U256<' target.into() } +/// todo: remove this, not used anywhere #[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))] pub fn get_target( nonce: u32, @@ -698,6 +715,8 @@ pub fn get_target( hash.reverse(); hash } + +/// Returns a tuple with a list of transaction short hashes and the nonce used to generate them pub fn hash_lists_tuple( tx_data: Vec, tx_short_hash_nonce: u64, @@ -715,6 +734,7 @@ pub fn hash_lists_tuple( (tx_short_hash_list, tx_hash_list_hash) } +/// Computes SipHash 24 of some transaction id (short hash) pub fn get_short_hash(txid: bitcoin::Txid, tx_short_hash_nonce: u64) -> ShortTxId<'static> { // hash the short hash nonce let nonce_hash = sha256::Hash::hash(&tx_short_hash_nonce.to_le_bytes()); @@ -741,12 +761,15 @@ fn tx_hash_list_hash_builder(txid_list: Vec) -> U256<'static> { hash.to_vec().try_into().unwrap() } +/// Creates a block from a solution submission pub struct BlockCreator<'a> { last_declare: DeclareMiningJob<'a>, tx_list: Vec, message: SubmitSolutionJd<'a>, } + impl<'a> BlockCreator<'a> { + /// Constructor pub fn new( last_declare: DeclareMiningJob<'a>, tx_list: Vec, @@ -760,9 +783,9 @@ impl<'a> BlockCreator<'a> { } } -/// TODO write a test for this function that takes an already mined block, and test if the new -/// block created with the hash of the new block created with the block creator coincides with the -/// hash of the mined block +// TODO write a test for this function that takes an already mined block, and test if the new +// block created with the hash of the new block created with the block creator coincides with the +// hash of the mined block impl<'a> From> for bitcoin::Block { fn from(block_creator: BlockCreator<'a>) -> bitcoin::Block { let last_declare = block_creator.last_declare; From 1427a72e2785f08e662d230d2679c1c814951d05 Mon Sep 17 00:00:00 2001 From: GitGab19 Date: Fri, 15 Nov 2024 16:56:04 +0100 Subject: [PATCH 07/18] handlers docs --- .../v2/roles-logic-sv2/src/handlers/common.rs | 88 ++++- .../src/handlers/job_declaration.rs | 192 ++++++++++- .../v2/roles-logic-sv2/src/handlers/mining.rs | 301 +++++++++++++++++- .../v2/roles-logic-sv2/src/handlers/mod.rs | 89 +++--- .../src/handlers/template_distribution.rs | 170 ++++++++++ 5 files changed, 759 insertions(+), 81 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/handlers/common.rs b/protocols/v2/roles-logic-sv2/src/handlers/common.rs index c2fb4ceb6..a520b8ca7 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/common.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/common.rs @@ -1,3 +1,36 @@ +//! # Common Handlers +//! +//! This module defines traits and implementations for handling common Stratum V2 messages exchanged +//! between upstream and downstream nodes. +//! +//! ## Core Traits +//! +//! - `ParseUpstreamCommonMessages`: Implemented by downstream nodes to handle common messages +//! received from upstream nodes, such as setup connection results or channel endpoint changes. +//! - `ParseDownstreamCommonMessages`: Implemented by upstream nodes to process setup connection +//! messages received from downstream nodes. +//! +//! ## Message Handling +//! +//! Handlers in this module are responsible for: +//! - Parsing and deserializing common messages. +//! - Dispatching deserialized messages to appropriate handler functions based on message type, such +//! as `SetupConnection` or `ChannelEndpointChanged`. +//! - Ensuring robust error handling for unexpected or malformed messages. +//! +//! ## Return Type +//! +//! Functions return `Result`, where `SendTo` specifies the next action for the +//! message: whether to forward it, respond to it, or ignore it. +//! +//! ## Structure +//! +//! This module includes: +//! - Traits for upstream and downstream message parsing and handling. +//! - Functions to process common message types while maintaining clear separation of concerns. +//! - Error handling mechanisms to address edge cases and ensure reliable communication within +//! Stratum V2 networks. + use super::SendTo_; use crate::{ common_properties::CommonDownstreamData, @@ -41,6 +74,7 @@ where routing_logic, ) } + /// Takes a message and it calls the appropriate handler function /// /// Arguments: @@ -86,19 +120,43 @@ where } } - /// Called by `Self::handle_message_common` when the `SetupConnectionSuccess` message is - /// received from the upstream node. + /// Handles a `SetupConnectionSuccess` message. + /// + /// This method processes a `SetupConnectionSuccess` message and handles it + /// by delegating to the appropriate handler. + /// + /// # Arguments + /// - `message`: The `SetupConnectionSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_setup_connection_success( &mut self, m: SetupConnectionSuccess, ) -> Result; - /// Called by `Self::handle_message_common` when the `SetupConnectionError` message is received - /// from the upstream node. + /// Handles a `SetupConnectionError` message. + /// + /// This method processes a `SetupConnectionError` message and handles it + /// by delegating to the appropriate handler. + /// + /// # Arguments + /// - `message`: The `SetupConnectionError` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_setup_connection_error(&mut self, m: SetupConnectionError) -> Result; - /// Called by `Self::handle_message_common` when the `ChannelEndpointChanged` message is - /// received from the upstream node. + /// Handles a `ChannelEndpointChanged` message. + /// + /// This method processes a `ChannelEndpointChanged` message and handles it + /// by delegating to the appropriate handler. + /// + /// # Arguments + /// - `message`: The `ChannelEndpointChanged` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_channel_endpoint_changed( &mut self, m: ChannelEndpointChanged, @@ -129,6 +187,7 @@ where Err(e) => Err(e), } } + /// It takes a message type and a payload, and if the message is a serialized setup connection /// message, it calls the `on_setup_connection` function on the routing logic, and then calls /// the `handle_setup_connection` function on the router @@ -150,8 +209,8 @@ where } /// It takes a message do setup connection message, it calls - /// the `on_setup_connection` function on the routing logic, and then calls the - /// `handle_setup_connection` function on the router + /// the `on_setup_connection` function on the routing logic, and then calls + /// the `handle_setup_connection` function on the router fn handle_message_common_deserilized( self_: Arc>, message: Result, Error>, @@ -192,8 +251,17 @@ where } } - /// Called by `Self::handle_message_common` when a setup connection message is received from the - /// downstream node. + /// Handles a `SetupConnection` message. + /// + /// This method processes a `SetupConnection` message and handles it + /// by delegating to the appropriate handler in the routing logic. + /// + /// # Arguments + /// - `message`: The `SetupConnection` message. + /// - `result`: The result of the `on_setup_connection` call, if available. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_setup_connection( &mut self, m: SetupConnection, diff --git a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs index 61044fa15..f0453fd8d 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs @@ -1,3 +1,44 @@ +//! # Job Declaration Handlers +//! +//! This module defines traits and functions for handling job declaration messages within the +//! Stratum V2 protocol. The job declaration process is integral to managing mining tasks and +//! transactions between server and client components. +//! +//! ## Core Traits +//! +//! - `ParseServerJobDeclarationMessages`: This trait is implemented by downstream nodes to process +//! job declaration messages received from upstream nodes. The trait includes methods for handling +//! job-related events such as mining job token allocation, job declaration successes or errors, +//! and transaction identification or provisioning. +//! - `ParseClientJobDeclarationMessages`: This trait is implemented by upstream nodes to manage job +//! declaration messages received from downstream nodes. It facilitates the handling of job +//! declarations, mining job token allocation, and transaction solutions submitted by downstream +//! nodes. +//! +//! ## Message Handling +//! +//! The handlers are responsible for the following tasks: +//! - Parsing and deserializing job declaration messages into appropriate types. +//! - Dispatching the deserialized messages to specific handler functions based on their type, such +//! as handling job token allocation, job declaration success or error responses, and transaction +//! data management. +//! +//! ## Return Type +//! +//! The functions return a `Result`. The `SendTo` type determines the next action for +//! the message: whether the message should be relayed, responded to, or ignored. If an error occurs +//! during processing, the `Error` type is returned. +//! +//! ## Structure +//! +//! This module contains: +//! - Traits for processing job declaration messages, covering both server-side and client-side +//! handling. +//! - Functions designed to parse, deserialize, and process messages related to job declarations, +//! with robust error handling. +//! - Error handling mechanisms to address unexpected messages and ensure safe processing, +//! particularly in the context of shared state. + use crate::{parsers::JobDeclaration, utils::Mutex}; use std::sync::Arc; pub type SendTo = SendTo_, ()>; @@ -7,12 +48,25 @@ use core::convert::TryInto; use job_declaration_sv2::*; use tracing::{debug, error, info, trace}; -/// A trait implemented by a downstream to handle SV2 job declaration messages. +/// A trait for parsing and handling SV2 job declaration messages sent by a server. +/// +/// This trait is designed to be implemented by downstream components that need to handle +/// various job declaration messages from an upstream SV2 server, such as job allocation, +/// declaration success, and error messages. pub trait ParseServerJobDeclarationMessages where Self: Sized, { - /// Used to parse job declaration message and route to the message's respected handler function + /// Routes an incoming job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message_type`: The type identifier of the incoming message. + /// - `payload`: A mutable slice containing the message payload. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration( self_: Arc>, message_type: u8, @@ -21,6 +75,16 @@ where Self::handle_message_job_declaration_deserialized(self_, (message_type, payload).try_into()) } + /// Routes a deserialized job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message`: A `Result` containing either the parsed message or an + /// error. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration_deserialized( self_: Arc>, message: Result, Error>, @@ -81,44 +145,95 @@ where Err(e) => Err(e), } } - /// When upstream send AllocateMiningJobTokenSuccess self should use the received token to - /// negotiate the next job + + /// Handles an `AllocateMiningJobTokenSuccess` message. /// - /// "[`job_declaration_sv2::AllocateMiningJobToken`]" + /// This method processes a message indicating a successful job token allocation. + /// + /// # Arguments + /// - `message`: The `AllocateMiningJobTokenSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_allocate_mining_job_token_success( &mut self, message: AllocateMiningJobTokenSuccess, ) -> Result; - // When upstream send DeclareMiningJobSuccess if the token is different from the one negotiated - // self must use the new token to refer to the committed job + /// Handles a `DeclareMiningJobSuccess` message. + /// + /// This method processes a message indicating a successful mining job declaration. + /// + /// # Arguments + /// - `message`: The `DeclareMiningJobSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_declare_mining_job_success( &mut self, message: DeclareMiningJobSuccess, ) -> Result; - // TODO: comment + /// Handles a `DeclareMiningJobError` message. + /// + /// This method processes a message indicating an error in the mining job declaration process. + /// + /// # Arguments + /// - `message`: The `DeclareMiningJobError` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_declare_mining_job_error( &mut self, message: DeclareMiningJobError, ) -> Result; - // TODO: comment + /// Handles an `IdentifyTransactions` message. + /// + /// This method processes a message that provides transaction identification data. + /// + /// # Arguments + /// - `message`: The `IdentifyTransactions` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_identify_transactions( &mut self, message: IdentifyTransactions, ) -> Result; - // TODO: comment + /// Handles a `ProvideMissingTransactions` message. + /// + /// This method processes a message that supplies missing transaction data. + /// + /// # Arguments + /// - `message`: The `ProvideMissingTransactions` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_provide_missing_transactions( &mut self, message: ProvideMissingTransactions, ) -> Result; } + +/// The `ParseClientJobDeclarationMessages` trait is responsible for handling job declaration +/// messages sent by clients to upstream nodes. The methods process messages like job declarations, +/// solutions, and transaction success indicators, ensuring proper routing and handling. pub trait ParseClientJobDeclarationMessages where Self: Sized, { + /// Routes an incoming job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message_type`: The type identifier of the incoming message. + /// - `payload`: A mutable slice containing the message payload. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration( self_: Arc>, message_type: u8, @@ -127,6 +242,16 @@ where Self::handle_message_job_declaration_deserialized(self_, (message_type, payload).try_into()) } + /// Routes a deserialized job declaration message to the appropriate handler function. + /// + /// # Parameters + /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. + /// - `message`: A `Result` containing either the parsed message or an + /// error. + /// + /// # Returns + /// - `Ok(SendTo)`: Indicates the message was successfully handled. + /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration_deserialized( self_: Arc>, message: Result, Error>, @@ -176,27 +301,72 @@ where .safe_lock(|x| x.handle_submit_solution(message)) .map_err(|e| crate::Error::PoisonLock(e.to_string()))? } - Ok(_) => todo!(), Err(e) => Err(e), } } + /// Handles an `AllocateMiningJobToken` message. + /// + /// This method processes a message that allocates a mining job token. + /// + /// # Arguments + /// - `message`: The `AllocateMiningJobToken` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_allocate_mining_job_token( &mut self, message: AllocateMiningJobToken, ) -> Result; + /// Handles a `DeclareMiningJob` message. + /// + /// This method processes a message that declares a new mining job. + /// + /// # Arguments + /// - `message`: The `DeclareMiningJob` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_declare_mining_job(&mut self, message: DeclareMiningJob) -> Result; + /// Handles an `IdentifyTransactionsSuccess` message. + /// + /// This method processes a message that confirms the identification of transactions. + /// + /// # Arguments + /// - `message`: The `IdentifyTransactionsSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_identify_transactions_success( &mut self, message: IdentifyTransactionsSuccess, ) -> Result; + /// Handles a `ProvideMissingTransactionsSuccess` message. + /// + /// This method processes a message that confirms the receipt of missing transactions. + /// + /// # Arguments + /// - `message`: The `ProvideMissingTransactionsSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_provide_missing_transactions_success( &mut self, message: ProvideMissingTransactionsSuccess, ) -> Result; + + /// Handles a `SubmitSolution` message. + /// + /// This method processes a message that submits a solution for the mining job. + /// + /// # Arguments + /// - `message`: The `SubmitSolutionJd` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_submit_solution(&mut self, message: SubmitSolutionJd) -> Result; } diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs index b5ec45e16..918a32405 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs @@ -1,3 +1,41 @@ +//! # Mining Handlers +//! +//! This module defines traits and functions for handling mining-related messages within the Stratum +//! V2 protocol. +//! +//! ## Core Traits +//! +//! - `ParseUpstreamMiningMessages`: Implemented by downstream nodes to process mining messages +//! received from upstream nodes. This trait provides methods for handling mining events like new +//! mining jobs, share submissions, extranonce prefix updates, and channel status updates. +//! - `ParseDownstreamMiningMessages`: Implemented by upstream nodes to manage mining messages +//! received from downstream nodes. This trait includes methods for managing tasks such as +//! submitting shares, opening mining channels, and handling mining job responses. +//! +//! ## Message Handling +//! +//! Handlers in this module are responsible for: +//! - Parsing and deserializing mining-related messages into the appropriate types. +//! - Dispatching the deserialized messages to specific handler functions based on message type, +//! such as handling new mining jobs, share submissions, and extranonce updates. +//! - Ensuring the integrity and validity of received messages, while interacting with downstream +//! mining systems to ensure proper communication and task execution. +//! +//! ## Return Type +//! +//! Functions return `Result, Error>`, where `SendTo` specifies the next action +//! for the message: whether it should be sent to the downstream node, an error response should be +//! generated, or the message should be ignored. +//! +//! ## Structure +//! +//! This module includes: +//! - Traits for processing mining-related messages for both upstream and downstream communication. +//! - Functions to parse, deserialize, and process messages related to mining, ensuring robust error +//! handling for unexpected conditions. +//! - Support for managing mining channels, extranonce prefixes, and share submissions, while +//! handling edge cases and ensuring the correctness of the mining process. + use crate::{common_properties::RequestIdMapper, errors::Error, parsers::Mining}; use core::convert::TryInto; use mining_sv2::{ @@ -24,16 +62,20 @@ use tracing::{debug, error, info, trace}; pub type SendTo = SendTo_, Remote>; +/// Represents supported channel types in a mining connection. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum SupportedChannelTypes { Standard, Extended, Group, - // Non header only connection can support both group and extended channels. + /// Represents a connection that supports both group and extended channels. GroupAndExtended, } -/// Connection-wide downtream's messages parser implemented by an upstream. +/// Trait for parsing downstream mining messages in a Stratum V2 connection. +/// +/// This trait defines methods for parsing and routing downstream messages +/// related to mining operations. pub trait ParseDownstreamMiningMessages< Up: IsMiningUpstream + D, Selector: DownstreamMiningSelector + D, @@ -41,10 +83,19 @@ pub trait ParseDownstreamMiningMessages< > where Self: IsMiningDownstream + Sized + D, { + /// Returns the type of channel supported by the downstream connection. fn get_channel_type(&self) -> SupportedChannelTypes; - /// Used to parse and route SV2 mining messages from the downstream based on `message_type` and - /// `payload` + /// Handles a mining message from the downstream, given its type and payload. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message_type`: The type of the mining message. + /// - `payload`: The raw payload of the message. + /// - `routing_logic`: The logic for routing the message to the appropriate upstream entity. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_message_mining( self_mutex: Arc>, message_type: u8, @@ -64,7 +115,15 @@ pub trait ParseDownstreamMiningMessages< } } - /// Used to route SV2 mining messages from the downstream + /// Deserializes and processes a mining message from the downstream. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message`: The mining message to be processed. + /// - `routing_logic`: The logic for routing the message to the appropriate upstream entity. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_message_mining_deserialized( self_mutex: Arc>, message: Result, Error>, @@ -265,9 +324,20 @@ pub trait ParseDownstreamMiningMessages< } } + /// Checks if work selection is enabled for the downstream connection. + /// + /// # Returns + /// - `bool`: `true` if work selection is enabled, `false` otherwise. fn is_work_selection_enabled(&self) -> bool; - /// returns None if the user is authorized and Open + /// Checks if the downstream user is authorized. + /// + /// # Arguments + /// - `_self_mutex`: The `Arc>` representing the downstream entity. + /// - `_user_identity`: The user's identity to be checked. + /// + /// # Returns + /// - `Result`: `true` if the user is authorized, `false` otherwise. fn is_downstream_authorized( _self_mutex: Arc>, _user_identity: &binary_sv2::Str0255, @@ -275,32 +345,94 @@ pub trait ParseDownstreamMiningMessages< Ok(true) } + /// Handles an `OpenStandardMiningChannel` message. + /// + /// This method processes an `OpenStandardMiningChannel` message and initiates the + /// appropriate response. + /// + /// # Arguments + /// - `m`: The `OpenStandardMiningChannel` message. + /// - `up`: An optional upstream entity to which the message is forwarded. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_standard_mining_channel( &mut self, m: OpenStandardMiningChannel, up: Option>>, ) -> Result, Error>; + /// Handles an `OpenExtendedMiningChannel` message. + /// + /// This method processes an `OpenExtendedMiningChannel` message and initiates the + /// appropriate response. + /// + /// # Arguments + /// - `m`: The `OpenExtendedMiningChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_extended_mining_channel( &mut self, m: OpenExtendedMiningChannel, ) -> Result, Error>; + /// Handles an `UpdateChannel` message. + /// + /// This method processes an `UpdateChannel` message and updates the channel settings. + /// + /// # Arguments + /// - `m`: The `UpdateChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_update_channel(&mut self, m: UpdateChannel) -> Result, Error>; + /// Handles a `SubmitSharesStandard` message. + /// + /// This method processes a `SubmitSharesStandard` message and validates the submitted shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesStandard` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_standard( &mut self, m: SubmitSharesStandard, ) -> Result, Error>; + /// Handles a `SubmitSharesExtended` message. + /// + /// This method processes a `SubmitSharesExtended` message and validates the submitted shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesExtended` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_extended( &mut self, m: SubmitSharesExtended, ) -> Result, Error>; + /// Handles a `SetCustomMiningJob` message. + /// + /// This method processes a `SetCustomMiningJob` message and applies the custom mining job + /// settings. + /// + /// # Arguments + /// - `m`: The `SetCustomMiningJob` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_custom_mining_job(&mut self, m: SetCustomMiningJob) -> Result, Error>; } -/// Connection-wide upstream's messages parser implemented by a downstream. + +/// A trait defining the parser for upstream mining messages used by a downstream. +/// +/// This trait provides the functionality to handle and route various types of mining messages +/// from the upstream based on the message type and payload. pub trait ParseUpstreamMiningMessages< Down: IsMiningDownstream + D, Selector: DownstreamMiningSelector + D, @@ -308,16 +440,35 @@ pub trait ParseUpstreamMiningMessages< > where Self: IsMiningUpstream + Sized + D, { + /// Retrieves the type of the channel supported by this upstream parser. + /// + /// # Returns + /// - `SupportedChannelTypes`: The supported channel type for this upstream. fn get_channel_type(&self) -> SupportedChannelTypes; + /// Retrieves an optional RequestIdMapper, used to manage request IDs across connections. + /// + /// # Returns + /// - `Option>>`: An optional RequestIdMapper for request ID + /// modification. fn get_request_id_mapper(&mut self) -> Option>> { None } - /// Used to parse and route SV2 mining messages from the upstream based on `message_type` and - /// `payload` The implementor of DownstreamMining needs to pass a RequestIdMapper if needing - /// to change the req id. Proxies likely would want to update a downstream req id to a new - /// one as req id must be connection-wide unique + /// Parses and routes SV2 mining messages from the upstream based on the message type and + /// payload. The implementor of DownstreamMining needs to pass a RequestIdMapper if changing + /// the request ID. Proxies typically need this to ensure the request ID is unique across + /// the connection. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message_type`: The type of the incoming message. + /// - `payload`: The payload containing the message data. + /// - `routing_logic`: The logic to handle the routing of the message based on the type. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message, either sending a + /// response or an error. fn handle_message_mining( self_mutex: Arc>, message_type: u8, @@ -334,6 +485,17 @@ pub trait ParseUpstreamMiningMessages< } } + /// Handles the deserialized mining message from the upstream, processing it according to the + /// routing logic. + /// + /// # Arguments + /// - `self_mutex`: The `Arc>` representing the downstream entity. + /// - `message`: The deserialized mining message, wrapped in a Result for error handling. + /// - `routing_logic`: The logic used to route the message based on the type. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message, either sending a + /// response or an error. fn handle_message_mining_deserialized( self_mutex: Arc>, message: Result, @@ -673,64 +835,181 @@ pub trait ParseUpstreamMiningMessages< } } + /// Determines whether work selection is enabled for this upstream. + /// + /// # Returns + /// - `bool`: A boolean indicating if work selection is enabled. fn is_work_selection_enabled(&self) -> bool; + /// Handles a successful response for opening a standard mining channel. + /// + /// # Arguments + /// - `m`: The `OpenStandardMiningChannelSuccess` message. + /// - `remote`: An optional reference to the downstream, wrapped in an `Arc`. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_standard_mining_channel_success( &mut self, m: OpenStandardMiningChannelSuccess, remote: Option>>, ) -> Result, Error>; + /// Handles a successful response for opening an extended mining channel. + /// + /// # Arguments + /// - `m`: The `OpenExtendedMiningChannelSuccess` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_open_extended_mining_channel_success( &mut self, m: OpenExtendedMiningChannelSuccess, ) -> Result, Error>; + /// Handles an error when opening a mining channel. + /// + /// # Arguments + /// - `m`: The `OpenMiningChannelError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_open_mining_channel_error( &mut self, m: OpenMiningChannelError, ) -> Result, Error>; + /// Handles an error when updating a mining channel. + /// + /// # Arguments + /// - `m`: The `UpdateChannelError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_update_channel_error(&mut self, m: UpdateChannelError) -> Result, Error>; + /// Handles a request to close a mining channel. + /// + /// # Arguments + /// - `m`: The `CloseChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_close_channel(&mut self, m: CloseChannel) -> Result, Error>; + /// Handles a request to set the extranonce prefix for mining. + /// + /// # Arguments + /// - `m`: The `SetExtranoncePrefix` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_extranonce_prefix( &mut self, m: SetExtranoncePrefix, ) -> Result, Error>; + /// Handles a successful submission of shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesSuccess` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_success( &mut self, m: SubmitSharesSuccess, ) -> Result, Error>; + /// Handles an error when submitting shares. + /// + /// # Arguments + /// - `m`: The `SubmitSharesError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_submit_shares_error(&mut self, m: SubmitSharesError) -> Result, Error>; + /// Handles a new mining job. + /// + /// # Arguments + /// - `m`: The `NewMiningJob` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_new_mining_job(&mut self, m: NewMiningJob) -> Result, Error>; + /// Handles a new extended mining job. + /// + /// # Arguments + /// - `m`: The `NewExtendedMiningJob` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_new_extended_mining_job( &mut self, m: NewExtendedMiningJob, ) -> Result, Error>; + /// Handles a request to set the new previous hash. + /// + /// # Arguments + /// - `m`: The `SetNewPrevHash` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result, Error>; + /// Handles a successful response for setting a custom mining job. + /// + /// # Arguments + /// - `m`: The `SetCustomMiningJobSuccess` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_custom_mining_job_success( &mut self, m: SetCustomMiningJobSuccess, ) -> Result, Error>; + /// Handles an error when setting a custom mining job. + /// + /// # Arguments + /// - `m`: The `SetCustomMiningJobError` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the error. fn handle_set_custom_mining_job_error( &mut self, m: SetCustomMiningJobError, ) -> Result, Error>; + /// Handles a request to set the target for mining. + /// + /// # Arguments + /// - `m`: The `SetTarget` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_target(&mut self, m: SetTarget) -> Result, Error>; + /// Handles a request to reconnect the mining connection. + /// + /// # Arguments + /// - `m`: The `Reconnect` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_reconnect(&mut self, m: Reconnect) -> Result, Error>; + /// Handles a request to set the group channel for mining. + /// + /// # Arguments + /// - `_m`: The `SetGroupChannel` message. + /// + /// # Returns + /// - `Result, Error>`: The result of processing the message. fn handle_set_group_channel(&mut self, _m: SetGroupChannel) -> Result, Error> { Ok(SendTo::None(None)) } diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mod.rs b/protocols/v2/roles-logic-sv2/src/handlers/mod.rs index b7d39ab4e..b717eb34a 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mod.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mod.rs @@ -1,76 +1,65 @@ -//! Handlers are divided per (sub)protocol and per Downstream/Upstream. -//! Each (sup)protocol defines a handler for both the Upstream node and the Downstream node -//! Handlers are a trait called `Parse[Downstream/Upstream][(sub)protocol]` -//! (eg. `ParseDownstreamCommonMessages`). +//! # Handlers Overview //! -//! When implemented, the handler makes the `handle_message_[(sub)protoco](..)` (e.g. -//! `handle_message_common(..)`) function available. +//! This module centralizes the logic for processing and routing Stratum V2 protocol messages, +//! defining traits and utilities to handle messages for both Downstream and Upstream roles. //! -//! The trait requires the implementer to define one function for each message type that a role -//! defined by the (sub)protocol and the Upstream/Downstream state could receive. +//! ## Purpose //! -//! This function will always take a mutable ref to `self`, a message payload + message type, and -//! the routing logic. -//! Using `parsers` in `crate::parser`, the payload and message type are parsed in an actual SV2 -//! message. -//! Routing logic is used in order to select the correct Downstream/Upstream to which the message -//! must be relayed/sent. -//! Routing logic is used to update the request id when needed. -//! After that, the specific function for the message type (implemented by the implementer) is -//! called with the SV2 message and the remote that must receive the message. +//! - Standardize the handling of protocol-specific messages. +//! - Enable efficient routing, transformation, and relaying of messages between nodes. +//! - Support modularity and scalability across Stratum V2 subprotocols. //! -//! A `Result` is returned and it is the duty of the implementer to send the -//! message. +//! ## Structure +//! +//! The module is organized by subprotocol and role, with handler traits for: +//! - `ParseDownstream[Protocol]`: Handles messages from Downstream nodes. +//! - `ParseUpstream[Protocol]`: Handles messages from Upstream nodes. +//! +//! Supported subprotocols include: +//! - `common`: Shared messages across protocols. +//! - `job_declaration`: Job-related messages. +//! - `mining`: Mining-specific messages. +//! - `template_distribution`: Template distribution messages. +//! +//! ## Return Values +//! +//! Handlers return `Result`, where: +//! - `SendTo_` specifies the action (relay, respond, or no action). +//! - `Error` indicates processing issues. pub mod common; pub mod job_declaration; pub mod mining; pub mod template_distribution; + use crate::utils::Mutex; use std::sync::Arc; #[derive(Debug)] -/// Message is a serializable entity that rapresent the meanings of communication between Remote(s) -/// SendTo_ is used to add context to Message, it say what we need to do with that Message. +/// Represents a serializable entity used for communication between Remotes. +/// The `SendTo_` enum adds context to the message, specifying the intended action. pub enum SendTo_ { - /// Used by proxies when Message must be relayed downstream or upstream and we want to specify - /// to which particular downstream or upstream we want to relay the message. - /// - /// When the message that we need to relay is the same message that we received should be used - /// RelaySameMessageToRemote in order to save an allocation. + /// Relay a new message to a specific remote. RelayNewMessageToRemote(Arc>, Message), - /// Used by proxies when Message must be relayed downstream or upstream and we want to specify - /// to which particular downstream or upstream we want to relay the message. - /// - /// Is used when we need to relay the same message the we received in order to save an - /// allocation. + /// Relay the same received message to a specific remote to avoid extra allocations. RelaySameMessageToRemote(Arc>), - /// Used by proxies when Message must be relayed downstream or upstream and we do not want to - /// specify specify to which particular downstream or upstream we want to relay the - /// message. + /// Relay a new message without specifying a specific remote. /// - /// This is used in proxies that do and Sv1 to Sv2 translation. The upstream is connected via - /// an extended channel that means that + /// This is common in proxies that translate between SV1 and SV2 protocols, where messages are + /// often broadcasted via extended channels. RelayNewMessage(Message), - /// Used proxies clients and servers to directly respond to a received message. + /// Directly respond to a received message. Respond(Message), + /// Relay multiple messages to various destinations. Multiple(Vec>), - /// Used by proxies, clients, and servers, when Message do not have to be used in any of the - /// above way. If Message is still needed to be used in a non conventional way we use - /// SendTo::None(Some(message)) If we just want to discard it we can use SendTo::None(None) + /// Indicates that no immediate action is required for the message. /// - /// SendTo::None(Some(m)) could be used for example when we do not need to send the message, - /// but we still need it for successive handling/transformation. - /// One of these cases are proxies that are connected to upstream via an extended channel (like - /// the Sv1 <-> Sv2 translator). This because extended channel messages are always general - /// for all the downstream, where standard channel message can be specific for a particular - /// downstream. Another case is when 2 roles are implemented in the same software, like a - /// pool that is both TP client and a Mining server, messages received by the TP client - /// must be sent to the Mining Server than transformed in Mining messages and sent to the - /// downstream. + /// This variant allows for cases where the message is still needed for later processing + /// (e.g., transformations or when two roles are implemented in the same software). None(Option), } impl SendTo_ { + /// Extracts the message, if available. pub fn into_message(self) -> Option { match self { Self::RelayNewMessageToRemote(_, m) => Some(m), @@ -81,6 +70,8 @@ impl SendTo_ { Self::None(m) => m, } } + + /// Extracts the remote, if available. pub fn into_remote(self) -> Option>> { match self { Self::RelayNewMessageToRemote(r, _) => Some(r), diff --git a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs index fd321d0f7..a0a5b94ec 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs @@ -1,3 +1,40 @@ +//! # Template Distribution Handlers +//! +//! This module defines traits and functions for handling template distribution messages within the +//! Stratum V2 protocol. +//! +//! ## Core Traits +//! +//! - `ParseServerTemplateDistributionMessages`: Implemented by downstream nodes to process template +//! distribution messages received from upstream nodes. This trait includes methods for handling +//! template-related events like new templates, previous hash updates, and transaction data +//! requests. +//! - `ParseClientTemplateDistributionMessages`: Implemented by upstream nodes to manage template +//! distribution messages received from downstream nodes. This trait handles coinbase output size, +//! transaction data requests, and solution submissions. +//! +//! ## Message Handling +//! +//! Handlers are responsible for: +//! - Parsing and deserializing template distribution messages into appropriate types. +//! - Dispatching the deserialized messages to specific handler functions based on message type, +//! such as handling new templates, transaction data requests, and coinbase output data. +//! +//! ## Return Type +//! +//! Functions return `Result`, where `SendTo` determines the next action for the +//! message: whether it should be relayed, responded to, or ignored. +//! +//! ## Structure +//! +//! This module includes: +//! - Traits for processing template distribution messages, including server-side and client-side +//! handling. +//! - Functions to parse, deserialize, and process messages related to template distribution, +//! ensuring robust error handling. +//! - Error handling mechanisms to address unexpected messages and ensure safe processing, +//! especially in the context of shared state. + use super::SendTo_; use crate::{errors::Error, parsers::TemplateDistribution, utils::Mutex}; use template_distribution_sv2::{ @@ -11,10 +48,28 @@ use core::convert::TryInto; use std::sync::Arc; use tracing::{debug, error, info, trace}; +/// Trait for handling template distribution messages received from upstream nodes (server side). +/// Includes functions to handle messages such as new templates, previous hash updates, and +/// transaction data requests. pub trait ParseServerTemplateDistributionMessages where Self: Sized, { + /// Handles incoming template distribution messages. + /// + /// This function is responsible for parsing and dispatching the appropriate handler based on + /// the message type. It first deserializes the payload and then routes it to the + /// corresponding handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message_type`: The type of the incoming message. + /// - `payload`: The raw payload data of the message. + /// + /// # Returns + /// - `Result`: The result of processing the message, where `SendTo` indicates + /// the next step in message handling. fn handle_message_template_distribution( self_: Arc>, message_type: u8, @@ -25,6 +80,19 @@ where (message_type, payload).try_into(), ) } + + /// Handles deserialized template distribution messages. + /// + /// This function takes the deserialized message and processes it according to the specific + /// message type, invoking the appropriate handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message`: The deserialized `TemplateDistribution` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_message_template_distribution_desrialized( self_: Arc>, message: Result, Error>, @@ -80,22 +148,82 @@ where Err(e) => Err(e), } } + + /// Handles a `NewTemplate` message. + /// + /// This method processes the `NewTemplate` message, which contains information about a newly + /// generated template. + /// + /// # Arguments + /// - `m`: The `NewTemplate` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_new_template(&mut self, m: NewTemplate) -> Result; + + /// Handles a `SetNewPrevHash` message. + /// + /// This method processes the `SetNewPrevHash` message, which updates the previous hash for a + /// template. + /// + /// # Arguments + /// - `m`: The `SetNewPrevHash` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result; + + /// Handles a `RequestTransactionDataSuccess` message. + /// + /// This method processes the success response for a requested transaction data message. + /// + /// # Arguments + /// - `m`: The `RequestTransactionDataSuccess` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_tx_data_success( &mut self, m: RequestTransactionDataSuccess, ) -> Result; + + /// Handles a `RequestTransactionDataError` message. + /// + /// This method processes an error response for a requested transaction data message. + /// + /// # Arguments + /// - `m`: The `RequestTransactionDataError` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_tx_data_error( &mut self, m: RequestTransactionDataError, ) -> Result; } +/// Trait for handling template distribution messages received from downstream nodes (client side). +/// Includes functions to handle messages such as coinbase output data size, transaction data +/// requests, and solution submissions. pub trait ParseClientTemplateDistributionMessages where Self: Sized, { + /// Handles incoming template distribution messages. + /// + /// This function is responsible for parsing and dispatching the appropriate handler based on + /// the message type. It first deserializes the payload and then routes it to the + /// corresponding handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message_type`: The type of the incoming message. + /// - `payload`: The raw payload data of the message. + /// + /// # Returns + /// - `Result`: The result of processing the message, where `SendTo` indicates + /// the next step in message handling. fn handle_message_template_distribution( self_: Arc>, message_type: u8, @@ -107,6 +235,18 @@ where ) } + /// Handles deserialized template distribution messages. + /// + /// This function takes the deserialized message and processes it according to the specific + /// message type, invoking the appropriate handler function. + /// + /// # Arguments + /// - `self_`: An `Arc>` representing the instance of the object implementing this + /// trait. + /// - `message`: The deserialized `TemplateDistribution` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_message_template_distribution_desrialized( self_: Arc>, message: Result, Error>, @@ -137,8 +277,38 @@ where Err(e) => Err(e), } } + + /// Handles a `CoinbaseOutputDataSize` message. + /// + /// This method processes a message that includes the coinbase output data size. + /// + /// # Arguments + /// - `m`: The `CoinbaseOutputDataSize` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_coinbase_out_data_size(&mut self, m: CoinbaseOutputDataSize) -> Result; + + /// Handles a `RequestTransactionData` message. + /// + /// This method processes a message requesting transaction data. + /// + /// # Arguments + /// - `m`: The `RequestTransactionData` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_tx_data(&mut self, m: RequestTransactionData) -> Result; + + /// Handles a `SubmitSolution` message. + /// + /// This method processes a solution submission message. + /// + /// # Arguments + /// - `m`: The `SubmitSolution` message. + /// + /// # Returns + /// - `Result`: The result of processing the message. fn handle_request_submit_solution(&mut self, m: SubmitSolution) -> Result; } From 54b05820fabcef448f75b93024f271d711b7ed1b Mon Sep 17 00:00:00 2001 From: Gabriele Vernetti Date: Fri, 22 Nov 2024 20:50:07 +0100 Subject: [PATCH 08/18] routing_logic docs --- .../v2/roles-logic-sv2/src/routing_logic.rs | 322 ++++++++++++------ 1 file changed, 214 insertions(+), 108 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/routing_logic.rs b/protocols/v2/roles-logic-sv2/src/routing_logic.rs index f6e4f3e7b..a23e62e4b 100644 --- a/protocols/v2/roles-logic-sv2/src/routing_logic.rs +++ b/protocols/v2/roles-logic-sv2/src/routing_logic.rs @@ -1,24 +1,50 @@ -//! The routing logic code is used by the handler to determine where a message should be relayed or -//! responded to +//! This module contains the routing logic used by handlers to determine where a message should be +//! relayed or responded to. //! -//! TODO It seems like a good idea to hide all the traits to the user and export marker traits -//! check if possible +//! ## Overview //! -//! - CommonRouter -> implemented by routers used by the common (sub)protocol +//! The routing logic defines a set of traits and structures to manage message routing in Stratum +//! V2. The following components are included: //! -//! - MiningRouter -> implemented by routers used by the mining (sub)protocol +//! - **`CommonRouter`**: Trait implemented by routers for the common (sub)protocol. +//! - **`MiningRouter`**: Trait implemented by routers for the mining (sub)protocol. +//! - **`CommonRoutingLogic`**: Enum defining the various routing logic for the common protocol +//! (e.g., Proxy, None). +//! - **`MiningRoutingLogic`**: Enum defining the routing logic for the mining protocol (e.g., +//! Proxy, None). +//! - **`NoRouting`**: Marker router that implements both `CommonRouter` and `MiningRouter` for +//! cases where no routing logic is required. +//! - **`MiningProxyRoutingLogic`**: Routing logic valid for a standard Sv2 mining proxy, +//! implementing both `CommonRouter` and `MiningRouter`. //! -//! - CommonRoutingLogic -> enum that define the enum the various routing logic for the common -//! (sub)protocol (eg Proxy None ...). +//! ## Details //! -//! - MiningProxyRoutingLogic -> enum that define the enum the various routing logic for the common -//! (sub)protocol (eg Proxy None ...). +//! ### Traits //! -//! - NoRouting -> implement both CommonRouter and MiningRouter used when the routing logic needed -//! is only None +//! - **`CommonRouter`**: Defines routing behavior for common protocol messages. +//! - **`MiningRouter`**: Defines routing behavior for mining protocol messages. Requires +//! `DownstreamMiningSelector` for downstream selection. //! -//! - MiningProxyRoutingLogic -> routing logic valid for a standard Sv2 mining proxy it is both a -//! CommonRouter and a MiningRouter +//! ### Enums +//! +//! - **`CommonRoutingLogic`**: Represents possible routing logics for the common protocol, such as +//! proxy-based routing or no routing. +//! - **`MiningRoutingLogic`**: Represents possible routing logics for the mining protocol, +//! supporting additional parameters such as selectors and marker traits. +//! +//! ### Structures +//! +//! - **`NoRouting`**: A minimal implementation of `CommonRouter` and `MiningRouter` that panics +//! when used. Its primary purpose is to serve as a placeholder for cases where no routing logic +//! is applied. +//! - **`MiningProxyRoutingLogic`**: Implements routing logic for a standard Sv2 proxy, including +//! upstream selection and message transformation. +//! +//! ## Future Work +//! +//! - Consider hiding all traits from the public API and exporting only marker traits. +//! - Improve upstream selection logic to be configurable by the caller. + use crate::{ common_properties::{CommonDownstreamData, IsMiningDownstream, IsMiningUpstream, PairSettings}, selectors::{ @@ -34,26 +60,45 @@ use common_messages_sv2::{ use mining_sv2::{OpenStandardMiningChannel, OpenStandardMiningChannelSuccess}; use std::{collections::HashMap, fmt::Debug as D, marker::PhantomData, sync::Arc}; -/// The CommonRouter trait defines a router needed by -/// [`crate::handlers::common::ParseUpstreamCommonMessages`] and -/// [`crate::handlers::common::ParseDownstreamCommonMessages`] +/// Defines routing logic for common protocol messages. +/// +/// Implemented by handlers (such as [`crate::handlers::common::ParseUpstreamCommonMessages`] and +/// [`crate::handlers::common::ParseDownstreamCommonMessages`]) to determine the behavior for common +/// protocol routing. pub trait CommonRouter: std::fmt::Debug { + /// Handles a `SetupConnection` message for the common protocol. + /// + /// # Arguments + /// - `message`: The `SetupConnection` message received. + /// + /// # Returns + /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: The routing result. fn on_setup_connection( &mut self, message: &SetupConnection, ) -> Result<(CommonDownstreamData, SetupConnectionSuccess), Error>; } -/// The MiningRouter trait defines a router needed by -/// [`crate::handlers::mining::ParseDownstreamMiningMessages`] and -/// [`crate::handlers::mining::ParseUpstreamMiningMessages`] +/// Defines routing logic for mining protocol messages. +/// +/// Implemented by handlers (such as [`crate::handlers::mining::ParseUpstreamMiningMessages`] and +/// [`crate::handlers::mining::ParseDownstreamMiningMessages`]) to determine the behavior for mining +/// protocol routing. This trait extends [`CommonRouter`] to handle mining-specific routing logic. pub trait MiningRouter< Down: IsMiningDownstream, Up: IsMiningUpstream, Sel: DownstreamMiningSelector, >: CommonRouter { - #[allow(clippy::result_unit_err)] + /// Handles an `OpenStandardMiningChannel` message from a downstream. + /// + /// # Arguments + /// - `downstream`: The downstream mining entity. + /// - `request`: The mining channel request message. + /// - `downstream_mining_data`: Associated downstream mining data. + /// + /// # Returns + /// - `Result>, Error>`: The upstream mining entity. fn on_open_standard_channel( &mut self, downstream: Arc>, @@ -61,7 +106,14 @@ pub trait MiningRouter< downstream_mining_data: &CommonDownstreamData, ) -> Result>, Error>; - #[allow(clippy::result_unit_err)] + /// Handles an `OpenStandardMiningChannelSuccess` message from an upstream. + /// + /// # Arguments + /// - `upstream`: The upstream mining entity. + /// - `request`: The successful channel opening message. + /// + /// # Returns + /// - `Result>, Error>`: The downstream mining entity. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -69,10 +121,9 @@ pub trait MiningRouter< ) -> Result>, Error>; } -/// NoRouting Router used when `RoutingLogic::None` and `MiningRoutingLogic::None` are needed. -/// It implements both `CommonRouter` and `MiningRouter`, and panics if used as an actual router. -/// The only purpose of `NoRouting` is a marker trait for when `RoutingLogic::None` and -/// `MiningRoutingLogic::None` +/// A no-operation router for scenarios where no routing logic is needed. +/// +/// Implements both `CommonRouter` and `MiningRouter` but panics if invoked. #[derive(Debug)] pub struct NoRouting(); @@ -84,6 +135,7 @@ impl CommonRouter for NoRouting { unreachable!() } } + impl< Down: IsMiningDownstream + D, Up: IsMiningUpstream + D, @@ -107,16 +159,16 @@ impl< } } -/// Enum that contains the possible routing logic is usually contructed before calling -/// handle_message_..() +/// Routing logic options for the common protocol. #[derive(Debug)] pub enum CommonRoutingLogic { + /// Proxy routing logic for the common protocol. Proxy(&'static Mutex), + /// No routing logic. None, } -/// Enum that contains the possibles routing logic is usually contructed before calling -/// handle_message_..() +/// Routing logic options for the mining protocol. #[derive(Debug)] pub enum MiningRoutingLogic< Down: IsMiningDownstream + D, @@ -124,8 +176,11 @@ pub enum MiningRoutingLogic< Sel: DownstreamMiningSelector + D, Router: 'static + MiningRouter, > { + /// Proxy routing logic for the mining protocol. Proxy(&'static Mutex), + /// No routing logic. None, + /// Marker for the generic parameters. _P(PhantomData<(Down, Up, Sel)>), } @@ -155,12 +210,39 @@ impl< } } +/// Routing logic for a standard Sv2 mining proxy. +#[derive(Debug)] +pub struct MiningProxyRoutingLogic< + Down: IsMiningDownstream + D, + Up: IsMiningUpstream + D, + Sel: DownstreamMiningSelector + D, +> { + /// Selector for upstream entities. + pub upstream_selector: GeneralMiningSelector, + /// ID generator for downstream entities. + pub downstream_id_generator: Id, + /// Mapping from downstream to upstream entities. + pub downstream_to_upstream_map: HashMap>>>, +} + impl< Down: IsMiningDownstream + D, Up: IsMiningUpstream + D, Sel: DownstreamMiningSelector + D, > CommonRouter for MiningProxyRoutingLogic { + /// Handles the `SetupConnection` message. + /// + /// This method initializes the connection between a downstream and an upstream by determining + /// the appropriate upstream based on the provided protocol, versions, and flags. + /// + /// # Arguments + /// - `message`: A reference to the `SetupConnection` message containing the connection details. + /// + /// # Returns + /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: On success, returns the + /// downstream connection data and the corresponding setup success message. Returns an error + /// otherwise. fn on_setup_connection( &mut self, message: &SetupConnection, @@ -180,7 +262,7 @@ impl< (Protocol::MiningProtocol, true) => { self.on_setup_connection_mining_header_only(&pair_settings) } - // TODO add handler for other protocols + // TODO: Add handler for other protocols. _ => Err(Error::UnimplementedProtocol), } } @@ -192,12 +274,55 @@ impl< Sel: DownstreamMiningSelector + D, > MiningRouter for MiningProxyRoutingLogic { - /// On open standard channel success: - /// 1. the downstream that requested the opening of the channel must be selected an put in the - /// right group channel - /// 2. request_id from upsteram is replaced with the original request id from downstream + /// Handles the `OpenStandardMiningChannel` message. + /// + /// This method processes the request to open a standard mining channel. It selects a suitable + /// upstream, updates the request ID to ensure uniqueness, and then delegates to + /// `on_open_standard_channel_request_header_only` to finalize the process. + /// + /// # Arguments + /// - `downstream`: The downstream requesting the channel opening. + /// - `request`: A mutable reference to the `OpenStandardMiningChannel` message. + /// - `downstream_mining_data`: Common data about the downstream mining setup. + /// + /// # Returns + /// - `Result>, Error>`: Returns the selected upstream for the downstream or an + /// error. + fn on_open_standard_channel( + &mut self, + downstream: Arc>, + request: &mut OpenStandardMiningChannel, + downstream_mining_data: &CommonDownstreamData, + ) -> Result>, Error> { + let upstreams = self + .downstream_to_upstream_map + .get(downstream_mining_data) + .ok_or(Error::NoCompatibleUpstream(*downstream_mining_data))?; + // If we are here, a list of possible upstreams has already been selected. + // TODO: The upstream selection logic should be specified by the caller. + let upstream = + Self::select_upstreams(&mut upstreams.to_vec()).ok_or(Error::NoUpstreamsConnected)?; + let old_id = request.get_request_id_as_u32(); + let new_req_id = upstream + .safe_lock(|u| u.get_mapper().unwrap().on_open_channel(old_id)) + .map_err(|e| Error::PoisonLock(e.to_string()))?; + request.update_id(new_req_id); + self.on_open_standard_channel_request_header_only(downstream, request) + } + + /// Handles the `OpenStandardMiningChannelSuccess` message. /// - /// The selected downstream is returned + /// This method processes the success message received from an upstream when a standard mining + /// channel is opened. It maps the request ID back to the original ID from the downstream and + /// updates the associated group and channel IDs in the upstream. + /// + /// # Arguments + /// - `upstream`: The upstream involved in the channel opening. + /// - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. + /// + /// # Returns + /// - `Result>, Error>`: Returns the downstream corresponding to the request or + /// an error. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -225,48 +350,19 @@ impl< }) .map_err(|e| Error::PoisonLock(e.to_string()))? } - - /// At this point the Sv2 connection with downstream is initialized that means that - /// routing_logic has already preselected a set of upstreams pairable with downstream. - /// - /// Updates the request id from downstream to a connection-wide unique request id for - /// downstream. - fn on_open_standard_channel( - &mut self, - downstream: Arc>, - request: &mut OpenStandardMiningChannel, - downstream_mining_data: &CommonDownstreamData, - ) -> Result>, Error> { - let upstreams = self - .downstream_to_upstream_map - .get(downstream_mining_data) - .ok_or(Error::NoCompatibleUpstream(*downstream_mining_data))?; - // If we are here a list of possible upstreams has been already selected - // TODO the upstream selection logic should be specified by the caller - let upstream = - Self::select_upstreams(&mut upstreams.to_vec()).ok_or(Error::NoUpstreamsConnected)?; - let old_id = request.get_request_id_as_u32(); - let new_req_id = upstream - .safe_lock(|u| u.get_mapper().unwrap().on_open_channel(old_id)) - .map_err(|e| Error::PoisonLock(e.to_string()))?; - request.update_id(new_req_id); - self.on_open_standard_channel_request_header_only(downstream, request) - } -} - -/// Routing logic valid for a standard Sv2 proxy -#[derive(Debug)] -pub struct MiningProxyRoutingLogic< - Down: IsMiningDownstream + D, - Up: IsMiningUpstream + D, - Sel: DownstreamMiningSelector + D, -> { - pub upstream_selector: GeneralMiningSelector, - pub downstream_id_generator: Id, - pub downstream_to_upstream_map: HashMap>>>, - //pub upstream_startegy: MiningUpstreamSelectionStrategy, } +/// Selects the upstream with the lowest total hash rate. +/// +/// # Arguments +/// - `ups`: A mutable slice of upstream mining entities. +/// +/// # Returns +/// - `Arc>`: The upstream entity with the lowest total hash rate. +/// +/// # Panics +/// This function panics if the slice is empty, as it is internally guaranteed that this function +/// will only be called with non-empty vectors. fn minor_total_hr_upstream(ups: &mut [Arc>]) -> Arc> where Down: IsMiningDownstream + D, @@ -275,7 +371,7 @@ where { ups.iter_mut() .reduce(|acc, item| { - // Is fine to unwrap a safe_lock result + // Safely locks and compares the total hash rate of each upstream. if acc.safe_lock(|x| x.total_hash_rate()).unwrap() < item.safe_lock(|x| x.total_hash_rate()).unwrap() { @@ -284,12 +380,17 @@ where item } }) - // Internal private function we only call thi function with non void vectors so is safe to - // unwrap here .unwrap() - .clone() + .clone() // Unwrap is safe because the function only operates on non-empty vectors. } +/// Filters upstream entities that are not configured for header-only mining. +/// +/// # Arguments +/// - `ups`: A mutable slice of upstream mining entities. +/// +/// # Returns +/// - `Vec>>`: A vector of upstream entities that are not header-only. fn filter_header_only(ups: &mut [Arc>]) -> Vec>> where Down: IsMiningDownstream + D, @@ -307,9 +408,18 @@ where .collect() } -/// If only one upstream is avaiable return it. -/// Try to return an upstream that is not header only. -/// Return the upstream that has less hash rate from downstreams. +/// Selects the most appropriate upstream entity based on specific criteria. +/// +/// # Criteria +/// - If only one upstream is available, it is selected. +/// - If multiple upstreams exist, preference is given to those not configured as header-only. +/// - Among the remaining upstreams, the one with the lowest total hash rate is selected. +/// +/// # Arguments +/// - `ups`: A mutable slice of upstream mining entities. +/// +/// # Returns +/// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstream(ups: &mut [Arc>]) -> Option>> where Down: IsMiningDownstream + D, @@ -333,28 +443,32 @@ impl< Sel: DownstreamMiningSelector + D, > MiningProxyRoutingLogic { - /// TODO this should stay in a enum UpstreamSelectionLogic that get passed from the caller to - /// the several methods + /// Selects an upstream entity from a list of available upstreams. + /// + /// # Arguments + /// - `ups`: A mutable slice of upstream mining entities. + /// + /// # Returns + /// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstreams(ups: &mut [Arc>]) -> Option>> { select_upstream(ups) } - /// On setup connection the proxy finds all the upstreams that support the downstream - /// connection, creates a downstream message parser that points to all the possible - /// upstreams, and then responds with suppported flags. + /// Handles the `SetupConnection` process for header-only mining downstreams. /// - /// The upstream with min total_hash_rate is selected (TODO a method to let the caller which - /// upstream select from the possible ones should be added - /// on_setup_connection_mining_header_only_2 that return a Vec of possibe upstreams) + /// This method selects compatible upstreams, assigns connection flags, and maps the + /// downstream to the selected upstreams. /// - /// This function returns a downstream id that the new created downstream must return via the - /// trait function get_id and the flags of the paired upstream + /// # Arguments + /// - `pair_settings`: The pairing settings for the connection. + /// + /// # Returns + /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: The connection result. pub fn on_setup_connection_mining_header_only( &mut self, pair_settings: &PairSettings, ) -> Result<(CommonDownstreamData, SetupConnectionSuccess), Error> { let mut upstreams = self.upstream_selector.on_setup_connection(pair_settings)?; - // TODO the upstream selection logic should be specified by the caller let upstream = Self::select_upstreams(&mut upstreams.0).ok_or(Error::NoUpstreamsConnected)?; let downstream_data = CommonDownstreamData { @@ -373,21 +487,14 @@ impl< Ok((downstream_data, message)) } - /// On open standard channel request: - /// 1. an upstream must be selected between the possible upstreams for this downstream. If the - /// downstream* is header only, just one upstream will be there, so the choice is easy, if - /// not (TODO on_open_standard_channel_request_no_standard_job must be used) - /// 2. request_id from downstream is updated to a connection-wide unique request-id for - /// upstreams - /// - /// The selected upstream is returned + /// Handles a standard channel opening request for header-only mining downstreams. /// + /// # Arguments + /// - `downstream`: The downstream mining entity. + /// - `request`: The standard mining channel request message. /// - /// * The downstream that wants to open a channel already connected with the proxy so a - /// valid upstream has already been selected (otherwise downstream can not be connected). - /// If the downstream is header only, only one valid upstream has been selected (cause a - /// header only mining device can be connected only with one pool) - #[allow(clippy::result_unit_err)] + /// # Returns + /// - `Result>, Error>`: The selected upstream mining entity. pub fn on_open_standard_channel_request_header_only( &mut self, downstream: Arc>, @@ -396,7 +503,6 @@ impl< let downstream_mining_data = downstream .safe_lock(|d| d.get_downstream_mining_data()) .map_err(|e| crate::Error::PoisonLock(e.to_string()))?; - // header only downstream must map to only one upstream let upstream = self .downstream_to_upstream_map .get(&downstream_mining_data) From 57aa0ee6e35f238976ce2376683f9ea5eadf531c Mon Sep 17 00:00:00 2001 From: Gabriele Vernetti Date: Fri, 22 Nov 2024 20:50:16 +0100 Subject: [PATCH 09/18] selectors docs --- protocols/v2/roles-logic-sv2/src/selectors.rs | 256 ++++++++++++++++-- 1 file changed, 233 insertions(+), 23 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/selectors.rs b/protocols/v2/roles-logic-sv2/src/selectors.rs index b3750bdb2..fcfcffeb5 100644 --- a/protocols/v2/roles-logic-sv2/src/selectors.rs +++ b/protocols/v2/roles-logic-sv2/src/selectors.rs @@ -1,5 +1,39 @@ -//! Selectors are used from the routing logic in order to chose to which remote or set of remotes -//! a message should be ralyied, or to which remote or set of remotes a message should be sent. +//! This module provides selectors and routing logic for managing downstream and upstream nodes +//! in a mining proxy environment. Selectors help determine the appropriate remote(s) to relay or +//! send messages to. +//! +//! ## Components +//! +//! - **`ProxyDownstreamMiningSelector`**: A selector for managing downstream nodes in a mining +//! proxy, mapping requests and channel IDs to specific downstream nodes or groups. +//! - **`NullDownstreamMiningSelector`**: A no-op selector for cases where routing logic is +//! unnecessary, commonly used in test scenarios. +//! - **`GeneralMiningSelector`**: A flexible upstream selector that matches downstream nodes with +//! compatible upstream nodes based on pairing settings and flags. +//! +//! ## Traits +//! +//! - **`DownstreamSelector`**: Base trait for all downstream selectors. +//! - **`DownstreamMiningSelector`**: Specialized trait for selectors managing mining-specific +//! downstream nodes. +//! - **`UpstreamSelector`**: Base trait for upstream node selectors. +//! - **`UpstreamMiningSelctor`**: Specialized trait for selectors managing upstream mining nodes. +//! +//! ## Details +//! +//! ### ProxyDownstreamMiningSelector +//! - Manages mappings for request IDs, channel IDs, and downstream groups. +//! - Provides methods to handle standard channel operations, such as opening channels and +//! retrieving or removing downstream nodes associated with a channel. +//! +//! ### NullDownstreamMiningSelector +//! - Implements all required traits but panics if called. +//! - Useful for minimal setups or as a placeholder in tests. +//! +//! ### GeneralMiningSelector +//! - Matches downstream nodes to upstream nodes based on pairing compatibility. +//! - Tracks upstream nodes and their IDs for efficient lookups. + use crate::{ common_properties::{IsDownstream, IsMiningDownstream, IsMiningUpstream, PairSettings}, utils::Mutex, @@ -8,15 +42,24 @@ use crate::{ use nohash_hasher::BuildNoHashHasher; use std::{collections::HashMap, fmt::Debug as D, sync::Arc}; -/// A DownstreamMiningSelector useful for routing messages in a mining proxy +/// A selector used for routing messages to specific downstream mining nodes. +/// +/// This structure maintains mappings for request IDs, channel IDs, and downstream nodes +/// to facilitate efficient message routing. #[derive(Debug, Clone, Default)] pub struct ProxyDownstreamMiningSelector { + /// Maps request IDs to their corresponding downstream nodes. request_id_to_remotes: HashMap>, BuildNoHashHasher>, + /// Maps group channel IDs to a list of downstream nodes. channel_id_to_downstreams: HashMap>>, BuildNoHashHasher>, + /// Maps standard channel IDs to a single downstream node. channel_id_to_downstream: HashMap>, BuildNoHashHasher>, } impl ProxyDownstreamMiningSelector { + /// Creates a new `ProxyDownstreamMiningSelector`. + /// + /// This initializes the internal mappings with `nohash` hashers for performance. pub fn new() -> Self { Self { request_id_to_remotes: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -24,6 +67,10 @@ impl ProxyDownstreamMiningSelector { channel_id_to_downstream: HashMap::with_hasher(BuildNoHashHasher::default()), } } + + /// Creates a new `ProxyDownstreamMiningSelector` wrapped in a mutex and an `Arc`. + /// + /// This is useful for concurrent environments where shared ownership is needed. pub fn new_as_mutex() -> Arc> where Self: Sized, @@ -33,6 +80,10 @@ impl ProxyDownstreamMiningSelector { } impl ProxyDownstreamMiningSelector { + /// Removes a downstream node from all mappings. + /// + /// # Arguments + /// - `d`: The downstream node to be removed. fn _remove_downstream(&mut self, d: &Arc>) { self.request_id_to_remotes.retain(|_, v| !Arc::ptr_eq(v, d)); self.channel_id_to_downstream @@ -43,10 +94,25 @@ impl ProxyDownstreamMiningSelector { impl DownstreamMiningSelector for ProxyDownstreamMiningSelector { + /// Registers a request ID and its associated downstream node. + /// + /// # Arguments + /// - `request_id`: The unique request ID. + /// - `downstream`: The downstream node associated with the request. fn on_open_standard_channel_request(&mut self, request_id: u32, downstream: Arc>) { self.request_id_to_remotes.insert(request_id, downstream); } + /// Finalizes the mapping of a standard channel to its downstream node. + /// + /// # Arguments + /// - `request_id`: The request ID used during the channel opening. + /// - `g_channel_id`: The group channel ID. + /// - `channel_id`: The specific standard channel ID. + /// + /// # Returns + /// - `Ok`: The downstream node associated with the request. + /// - `Err`: If the request ID is unknown. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -69,10 +135,25 @@ impl DownstreamMiningSelector Ok(downstream) } + /// Retrieves all downstream nodes associated with a standard/group channel ID. + /// + /// # Arguments + /// - `channel_id`: The standard/group channel ID. + /// + /// # Returns + /// - `Some`: A reference to the vector of downstream nodes. + /// - `None`: If no nodes are associated with the channel. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>> { self.channel_id_to_downstreams.get(&channel_id) } + /// Removes all downstream nodes associated with a standard/group channel ID. + /// + /// # Arguments + /// - `channel_id`: The standard/group channel ID. + /// + /// # Returns + /// A vector of the removed downstream nodes. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>> { let downs = self .channel_id_to_downstreams @@ -84,17 +165,34 @@ impl DownstreamMiningSelector downs } + /// Removes a specific downstream node from all mappings. + /// + /// # Arguments + /// - `d`: The downstream node to be removed. fn remove_downstream(&mut self, d: &Arc>) { for dws in self.channel_id_to_downstreams.values_mut() { - dws.retain(|d| !Arc::ptr_eq(d, d)); + dws.retain(|node| !Arc::ptr_eq(node, d)); } self._remove_downstream(d); } + /// Retrieves the downstream node associated with a specific standard channel ID. + /// + /// # Arguments + /// - `channel_id`: The standard channel ID. + /// + /// # Returns + /// - `Some`: The downstream node. + /// - `None`: If no node is associated with the channel. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>> { self.channel_id_to_downstream.get(&channel_id).cloned() } + + /// Retrieves all downstream nodes currently managed by this selector. + /// + /// # Returns + /// A vector of downstream nodes. fn get_all_downstreams(&self) -> Vec>> { self.channel_id_to_downstream.values().cloned().collect() } @@ -103,16 +201,31 @@ impl DownstreamMiningSelector impl DownstreamSelector for ProxyDownstreamMiningSelector {} /// Implemented by a selector used by an upstream mining node or and upstream mining node -/// abstraction in order to find the right downstream to which a message should be sent or relayied +/// abstraction in order to find the right downstream to which a message should be sent or relayed. pub trait DownstreamMiningSelector: DownstreamSelector { + /// Handles a request to open a standard channel. + /// + /// # Arguments + /// - `request_id`: The ID of the request. + /// - `downstream`: A reference to the downstream requesting the channel. fn on_open_standard_channel_request( &mut self, request_id: u32, downstream: Arc>, ); + /// Handles a successful response to opening a standard channel. + /// + /// # Arguments + /// - `request_id`: The ID of the request. + /// - `g_channel_id`: The global channel ID. + /// - `channel_id`: The local channel ID. + /// + /// # Returns + /// - `Result>, Error>`: The downstream associated with the channel or an + /// error. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -120,32 +233,62 @@ pub trait DownstreamMiningSelector: channel_id: u32, ) -> Result>, Error>; - // group / standard naming is terrible channel_id in this case can be either the channel_id - // or the group_channel_id + /// Retrieves all downstreams associated with a channel ID. + /// + /// # Arguments + /// - `channel_id`: The channel ID to query. + /// + /// # Returns + /// - `Option<&Vec>>>`: The list of downstreams or `None`. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>>; + /// Removes all downstreams associated with a channel ID. + /// + /// # Arguments + /// - `channel_id`: The channel ID to remove downstreams from. + /// + /// # Returns + /// - `Vec>>`: The removed downstreams. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>>; + /// Removes a specific downstream. + /// + /// # Arguments + /// - `d`: A reference to the downstream to remove. fn remove_downstream(&mut self, d: &Arc>); - // only for standard + /// Retrieves a downstream by channel ID (only for standard channels). + /// + /// # Arguments + /// - `channel_id`: The channel ID to query. + /// + /// # Returns + /// - `Option>>`: The downstream or `None`. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>>; + /// Retrieves all downstreams. + /// + /// # Returns + /// - `Vec>>`: All downstreams. fn get_all_downstreams(&self) -> Vec>>; } +/// A generic downstream selector. pub trait DownstreamSelector {} -/// A DownstreamMiningSelector that do nothing. Useful when ParseDownstreamCommonMessages or -/// ParseUpstreamCommonMessages must be implemented in very simple application (eg for test -/// puorposes) +/// A no-op implementation of `DownstreamMiningSelector`. +/// +/// This selector is primarily used for testing or minimal setups where routing logic is not needed. #[derive(Debug, Clone, Copy, Default)] pub struct NullDownstreamMiningSelector(); impl NullDownstreamMiningSelector { + /// Creates a new `NullDownstreamMiningSelector`. pub fn new() -> Self { NullDownstreamMiningSelector() } + + /// Creates a new `NullDownstreamMiningSelector` wrapped in a mutex and an `Arc`. pub fn new_as_mutex() -> Arc> where Self: Sized, @@ -155,6 +298,10 @@ impl NullDownstreamMiningSelector { } impl DownstreamMiningSelector for NullDownstreamMiningSelector { + /// Called when a standard channel open request is received. + /// + /// This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op + /// implementation. fn on_open_standard_channel_request( &mut self, _request_id: u32, @@ -163,6 +310,9 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_request") } + /// Called when a standard channel open request is successful. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn on_open_standard_channel_success( &mut self, _request_id: u32, @@ -172,52 +322,92 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_success") } + /// Retrieves the downstreams in a specific channel. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn get_downstreams_in_channel(&self, _channel_id: u32) -> Option<&Vec>>> { unreachable!("get_downstreams_in_channel") } + + /// Removes downstreams in a specific channel. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstreams_in_channel(&mut self, _channel_id: u32) -> Vec>> { unreachable!("remove_downstreams_in_channel") } + /// Removes a specific downstream node. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. + fn remove_downstream(&mut self, _d: &Arc>) { + unreachable!("remove_downstream") + } + + /// Retrieves the downstream associated with a specific channel ID. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn downstream_from_channel_id(&self, _channel_id: u32) -> Option>> { unreachable!("downstream_from_channel_id") } + + /// Retrieves all downstream nodes managed by this selector. + /// + /// This method is unreachable in `NullDownstreamMiningSelector`. fn get_all_downstreams(&self) -> Vec>> { unreachable!("get_all_downstreams") } - - fn remove_downstream(&mut self, _d: &Arc>) { - unreachable!("remove_downstream") - } } impl DownstreamSelector for NullDownstreamMiningSelector {} +/// Trait for selecting upstream nodes in a mining context. pub trait UpstreamSelector {} +/// Trait for selecting upstream mining nodes. +/// +/// This trait allows pairing downstream mining nodes with upstream nodes +/// based on their settings and capabilities. pub trait UpstreamMiningSelctor< Down: IsMiningDownstream, Up: IsMiningUpstream, Sel: DownstreamMiningSelector, >: UpstreamSelector { + /// Handles the `SetupConnection` process. + /// + /// # Arguments + /// - `pair_settings`: The settings for pairing downstream and upstream nodes. + /// + /// # Returns + /// - `Ok((Vec>>, u32))`: A vector of upstream nodes and their combined flags. + /// - `Err`: If no upstreams are pairable. #[allow(clippy::type_complexity)] fn on_setup_connection( &mut self, pair_settings: &PairSettings, ) -> Result<(Vec>>, u32), Error>; + + /// Retrieves an upstream node by its ID. + /// + /// # Arguments + /// - `upstream_id`: The unique ID of the upstream node. + /// + /// # Returns + /// - `Some`: The upstream node. + /// - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>>; } -/// Upstream selector is used to chose between a set of known mining upstream nodes which one/ones -/// can accept messages from a specific mining downstream node +/// General implementation of an upstream mining selector. #[derive(Debug)] pub struct GeneralMiningSelector< Sel: DownstreamMiningSelector, Down: IsMiningDownstream, Up: IsMiningUpstream, > { + /// List of upstream nodes. pub upstreams: Vec>>, + /// Mapping of upstream IDs to their respective nodes. pub id_to_upstream: HashMap>, BuildNoHashHasher>, sel: std::marker::PhantomData, down: std::marker::PhantomData, @@ -229,10 +419,13 @@ impl< Down: IsMiningDownstream, > GeneralMiningSelector { + /// Creates a new `GeneralMiningSelector`. + /// + /// # Arguments + /// - `upstreams`: A vector of upstream nodes. pub fn new(upstreams: Vec>>) -> Self { let mut id_to_upstream = HashMap::with_hasher(BuildNoHashHasher::default()); for up in &upstreams { - // Is ok to unwrap safe_lock result id_to_upstream.insert(up.safe_lock(|u| u.get_id()).unwrap(), up.clone()); } Self { @@ -242,10 +435,16 @@ impl< down: std::marker::PhantomData, } } + + /// Updates the list of upstream nodes. + /// + /// # Arguments + /// - `upstreams`: The new list of upstream nodes. pub fn update_upstreams(&mut self, upstreams: Vec>>) { self.upstreams = upstreams; } } + impl< Sel: DownstreamMiningSelector, Down: IsMiningDownstream, @@ -260,9 +459,14 @@ impl< Up: IsMiningUpstream, > UpstreamMiningSelctor for GeneralMiningSelector { - /// Return the set of mining upstream nodes that can accept messages from a downstream with - /// the passed PairSettings and the sum of all the accepted flags - #[allow(clippy::type_complexity)] + /// Handles the `SetupConnection` process and determines the pairable upstream nodes. + /// + /// # Arguments + /// - `pair_settings`: The settings for pairing downstream and upstream nodes. + /// + /// # Returns + /// - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. + /// - `Err`: If no upstreams are pairable. fn on_setup_connection( &mut self, pair_settings: &PairSettings, @@ -272,10 +476,8 @@ impl< for node in &self.upstreams { let is_pairable = node .safe_lock(|node| node.is_pairable(pair_settings)) - // Is ok to unwrap safe_lock result .unwrap(); if is_pairable { - // Is ok to unwrap safe_lock result supported_flags |= node.safe_lock(|n| n.get_flags()).unwrap(); supported_upstreams.push(node.clone()); } @@ -287,6 +489,14 @@ impl< Err(Error::NoPairableUpstream((2, 2, 0))) } + /// Retrieves an upstream node by its ID. + /// + /// # Arguments + /// - `upstream_id`: The unique ID of the upstream node. + /// + /// # Returns + /// - `Some`: The upstream node. + /// - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>> { self.id_to_upstream.get(&upstream_id).cloned() } From 098a0569550f2ca7e9bd4cc3ca7a1c832294d83d Mon Sep 17 00:00:00 2001 From: GitGab19 Date: Mon, 2 Dec 2024 13:40:51 +0700 Subject: [PATCH 10/18] roles_logic_sv2 README creation --- protocols/v2/roles-logic-sv2/README.md | 34 ++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 protocols/v2/roles-logic-sv2/README.md diff --git a/protocols/v2/roles-logic-sv2/README.md b/protocols/v2/roles-logic-sv2/README.md new file mode 100644 index 000000000..0787863f9 --- /dev/null +++ b/protocols/v2/roles-logic-sv2/README.md @@ -0,0 +1,34 @@ +# `roles_logic_sv2` + +[![crates.io](https://img.shields.io/crates/v/roles_logic_sv2.svg)](https://crates.io/crates/roles_logic_sv2) +[![docs.rs](https://docs.rs/roles_logic_sv2/badge.svg)](https://docs.rs/roles_logic_sv2) +[![rustc+](https://img.shields.io/badge/rustc-1.75.0%2B-lightgrey.svg)](https://blog.rust-lang.org/2023/12/28/Rust-1.75.0.html) +[![license](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/stratum-mining/stratum/blob/main/LICENSE.md) +[![codecov](https://codecov.io/gh/stratum-mining/stratum/branch/main/graph/badge.svg?flag=roles_logic_sv2-coverage)](https://codecov.io/gh/stratum-mining/stratum) + +`roles_logic_sv2` provides the core logic and utilities for implementing roles in the Stratum V2 (Sv2) protocol, such as miners, pools, and proxies. It abstracts message handling, channel management, job creation, and routing logic, enabling efficient and secure communication across upstream and downstream connections. + +## Main Components + +- **Channel Logic**: Manages the lifecycle and settings of communication channels (standard, extended, and group ones) between roles. +- **Handlers**: Provides traits for handling logic of Sv2 protocol messages. +- **Job Management**: Facilitates the creation, validation, and dispatching of mining jobs. +- **Parsers**: Handles serialization and deserialization of Sv2 messages via [`binary_sv2`](https://docs.rs/binary_sv2/latest/binary_sv2/index.html). +- **Routing Logic**: Implements message routing and downstream/upstream selector utilities. Useful for advanced proxy implementations with multiplexing of Standard Channels across different upstreams. +- **Utilities**: Provides helpers for safe mutex locking, mining-specific calculations, and more. + +## Usage + +To include this crate in your project, run: + +```bash +cargo add roles_logic_sv2 +``` + +This crate can be built with the following feature flags: + +- `with_serde`: Enables serialization and deserialization support using the serde library. This feature flag also activates the with_serde feature for dependent crates such as `binary_sv2`, `common_messages_sv2`, `template_distribution_sv2`, `job_declaration_sv2`, and `mining_sv2`. + Note that this feature flag is only used for the Message Generator, and deprecated + for any other kind of usage. It will likely be fully deprecated in the future. +- `prop_test`: Enables property-based testing features for template distribution logic, leveraging dependencies' testing capabilities such as `template_distribution_sv2` crate. +- `disable_nopanic`: Disables the nopanic logic in scenarios where code coverage tools might conflict with it. \ No newline at end of file From fe0d40a31b4bb420a7d47b6b64528c4a3926580b Mon Sep 17 00:00:00 2001 From: bit-aloo Date: Wed, 4 Dec 2024 18:55:45 +0530 Subject: [PATCH 11/18] roles_logic doc nits --- .../src/channel_logic/channel_factory.rs | 18 +- .../src/channel_logic/proxy_group_channel.rs | 21 +- .../roles-logic-sv2/src/common_properties.rs | 14 +- protocols/v2/roles-logic-sv2/src/errors.rs | 17 +- .../src/handlers/job_declaration.rs | 2 + .../v2/roles-logic-sv2/src/handlers/mining.rs | 1 + .../src/handlers/template_distribution.rs | 1 + .../v2/roles-logic-sv2/src/job_creator.rs | 64 +++--- .../v2/roles-logic-sv2/src/job_dispatcher.rs | 4 +- protocols/v2/roles-logic-sv2/src/lib.rs | 4 +- .../v2/roles-logic-sv2/src/routing_logic.rs | 132 ++++++------ protocols/v2/roles-logic-sv2/src/selectors.rs | 198 +++++++++--------- protocols/v2/roles-logic-sv2/src/utils.rs | 14 +- 13 files changed, 249 insertions(+), 241 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index cb74c4f76..040adb097 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -56,7 +56,7 @@ pub enum OnNewShare { /// Used when the received is malformed, is for an inexistent channel or do not meet downstream /// target. SendErrorDownstream(SubmitSharesError<'static>), - /// Used when an exteded channel in a proxy receive a share, and the share meet upstream + /// Used when an extended channel in a proxy receive a share, and the share meet upstream /// target, in this case a new share must be sent upstream. Also an optional template id is /// returned, when a job declarator want to send a valid share upstream could use the /// template for get the up job id. @@ -71,7 +71,7 @@ pub enum OnNewShare { /// (share, template id, coinbase,complete extranonce) ShareMeetBitcoinTarget((Share, Option, Vec, Vec)), /// Indicate that the share meet downstream target, in the case we could send a success - /// response dowmstream. + /// response downstream. ShareMeetDownstreamTarget, } @@ -266,7 +266,7 @@ impl ChannelFactory { let max_extranonce_size = self.extranonces.get_range2_len() as u16; if min_extranonce_size <= max_extranonce_size { // SECURITY is very unlikely to finish the ids btw this unwrap could be used by an - // attaccher that want to dirsrupt the service maybe we should have a method + // attacker that want to disrupt the service maybe we should have a method // to reuse ids that are no longer connected? let channel_id = self .ids @@ -404,7 +404,7 @@ impl ChannelFactory { } /// This function is called when downstream have a group channel - /// Shouldnt all standard channel's be non HOM?? + /// should not all standard channel's be non HOM?? fn new_standard_channel_for_non_hom_downstream( &mut self, request_id: u32, @@ -472,7 +472,7 @@ impl ChannelFactory { .get(&channel_id) .unwrap(); // OPTIMIZATION this could be memoized somewhere cause is very likely that we will receive a - // lot od OpenStandardMiningChannel requests consequtevely + // lot od OpenStandardMiningChannel requests consecutively let job_id = self.job_ids.next(); let future_jobs: Option>> = self .future_jobs @@ -568,11 +568,11 @@ impl ChannelFactory { } // When a new non HOM downstream opens a channel, we use this function to prepare all the - // extended jobs (future and non) and the prev hash that we need to send dowmstream + // extended jobs (future and non) and the prev hash that we need to send downstream fn prepare_jobs_and_p_hash(&mut self, result: &mut Vec, complete_id: u64) { // If group is 0 it means that we are preparing jobs and p hash for a non HOM downstream // that want to open a new extended channel in that case we want to use the channel id - // TODO verify that this is true also for the case where the channle factory is in a proxy + // TODO verify that this is true also for the case where the channel factory is in a proxy // and not in a pool. let group_id = match GroupId::into_group_id(complete_id) { 0 => GroupId::into_channel_id(complete_id), @@ -1178,7 +1178,7 @@ impl PoolChannelFactory { let target = self.job_creator.last_target(); // When downstream set a custom mining job we add the job to the negotiated job // hashmap, with the extended channel id as a key. Whenever the pool receive a share must - // first check if the channel have a negotiated job if so we can not retreive the template + // first check if the channel have a negotiated job if so we can not retrieve the template // via the job creator but we create a new one from the set custom job. if self.negotiated_jobs.contains_key(&m.channel_id) { let referenced_job = self.negotiated_jobs.get(&m.channel_id).unwrap(); @@ -1318,7 +1318,7 @@ impl PoolChannelFactory { } } -/// Used by proxies that want to open extended channls with upstream. If the proxy has job +/// Used by proxies that want to open extended channels with upstream. If the proxy has job /// declaration capabilities, we set the job creator and the coinbase outs. #[derive(Debug)] pub struct ProxyExtendedChannelFactory { diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs index 452c39fbb..73068fb8e 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs @@ -99,9 +99,9 @@ impl GroupChannel { last_received_job: None, } } - /// Called when a channel is successfully opened for header only mining on standard channels. - /// Here we store the new channel, and update state for jobs and return relevant SV2 messages - /// (NewMiningJob and SNPH) + // Called when a channel is successfully opened for header only mining(HOM) on standard + // channels. Here, we store the new channel, and update state for jobs and return relevant + // SV2 messages (NewMiningJob and SNPH) fn on_channel_success_for_hom_downtream( &mut self, m: OpenStandardMiningChannelSuccess, @@ -151,9 +151,10 @@ impl GroupChannel { Ok(res) } - /// If a matching job is already in the future job queue, - /// we set a new valid job, otherwise we clear the future jobs - /// queue and stage a prev hash to be used when the job arrives + + // If a matching job is already in the future job queue, + // we set a new valid job, otherwise we clear the future jobs + // queue and stage a prev hash to be used when the job arrives fn update_new_prev_hash(&mut self, m: &SetNewPrevHash) { while let Some(job) = self.future_jobs.pop() { if job.job_id == m.job_id { @@ -171,8 +172,9 @@ impl GroupChannel { }; self.last_prev_hash = Some(cloned.clone()); } - /// Pushes new job to future_job queue if it is future, - /// otherwise we set it as the valid job + + // Pushes new job to future_job queue if it is future, + // otherwise we set it as the valid job fn on_new_extended_mining_job(&mut self, m: NewExtendedMiningJob<'static>) { self.last_received_job = Some(m.clone()); if m.is_future() { @@ -181,7 +183,8 @@ impl GroupChannel { self.last_valid_job = Some(m) } } - /// Returns most recent job + + // Returns most recent job fn last_received_job_to_standard_job( &mut self, channel_id: u32, diff --git a/protocols/v2/roles-logic-sv2/src/common_properties.rs b/protocols/v2/roles-logic-sv2/src/common_properties.rs index ba7f57076..d36c901da 100644 --- a/protocols/v2/roles-logic-sv2/src/common_properties.rs +++ b/protocols/v2/roles-logic-sv2/src/common_properties.rs @@ -29,7 +29,7 @@ pub struct PairSettings { pub trait IsUpstream + ?Sized> { /// Used to bitcoin protocol version for the channel. fn get_version(&self) -> u16; - // Used to get flags for the defined sv2 message protocol + /// Used to get flags for the defined sv2 message protocol fn get_flags(&self) -> u32; /// Used to check if the upstream supports the protocol that the downstream wants to use fn get_supported_protocols(&self) -> Vec; @@ -55,7 +55,7 @@ pub trait IsUpstream + ?Sized> /// Channel to be opened with the upstream nodes. #[derive(Debug, Clone, Copy)] pub enum UpstreamChannel { - // nominal hash rate + /// nominal hash rate Standard(f32), Group, Extended, @@ -108,7 +108,7 @@ pub trait IsMiningDownstream: IsDownstream { } } -/// Implemented for the NullDownstreamMiningSelector +// Implemented for the NullDownstreamMiningSelector impl IsUpstream for () { fn get_version(&self) -> u16 { unreachable!("Null upstream do not have a version"); @@ -134,7 +134,7 @@ impl IsUpstream for } } -/// Implemented for the NullDownstreamMiningSelector +// Implemented for the NullDownstreamMiningSelector impl IsDownstream for () { fn get_downstream_mining_data(&self) -> CommonDownstreamData { unreachable!("Null downstream do not have mining data"); @@ -160,11 +160,11 @@ impl IsMiningUpstream downstream ids + // Mapping of upstream id -> downstream ids request_ids_map: HashMap>, next_id: u32, } @@ -188,7 +188,7 @@ impl RequestIdMapper { new_id } - /// Removes a upstream/downstream mapping from the `RequsetIdMapper`. + /// Removes a upstream/downstream mapping from the `RequestIdMapper`. pub fn remove(&mut self, upstream_id: u32) -> Option { self.request_ids_map.remove(&upstream_id) } diff --git a/protocols/v2/roles-logic-sv2/src/errors.rs b/protocols/v2/roles-logic-sv2/src/errors.rs index cc05fede7..47369a483 100644 --- a/protocols/v2/roles-logic-sv2/src/errors.rs +++ b/protocols/v2/roles-logic-sv2/src/errors.rs @@ -7,6 +7,7 @@ use crate::{ use binary_sv2::Error as BinarySv2Error; use std::fmt::{self, Display, Formatter}; +/// Error enum #[derive(Debug)] pub enum Error { /// Payload size is too big to fit into a frame @@ -29,7 +30,7 @@ pub enum Error { NoCompatibleUpstream(CommonDownstreamData), /// Error if the hashmap `future_jobs` field in the `GroupChannelJobDispatcher` is empty. NoFutureJobs, - /// No Downstreams connected + /// No Downstream's connected NoDownstreamsConnected, /// PrevHash requires non-existent Job Id PrevHashRequireNonExistentJobId(u32), @@ -61,7 +62,7 @@ pub enum Error { GroupIdNotFound, /// A share has been received but no job for it exist ShareDoNotMatchAnyJob, - /// A share has been recived but no channel for it exist + /// A share has been received but no channel for it exist ShareDoNotMatchAnyChannel, /// Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase InvalidCoinbase, @@ -124,7 +125,7 @@ impl Display for Error { BadPayloadSize => write!(f, "Payload is too big to fit into the frame"), BinarySv2Error(v) => write!( f, - "BinarySv2Error: error in serializing/deserilizing binary format {:?}", + "BinarySv2Error: error in serializing/deserializing binary format {:?}", v ), DownstreamDown => { @@ -174,12 +175,12 @@ impl Display for Error { }, NoMoreExtranonces => write!(f, "No more extranonces"), JobIsNotFutureButPrevHashNotPresent => write!(f, "A non future job always expect a previous new prev hash"), - ChannelIsNeitherExtendedNeitherInAPool => write!(f, "If a channel is neither extended neither is part of a pool the only thing to do when a OpenStandardChannle is received is to relay it upstream with and updated request id"), - ExtranonceSpaceEnded => write!(f, "No more avaible extranonces for downstream"), + ChannelIsNeitherExtendedNeitherInAPool => write!(f, "If a channel is neither extended neither is part of a pool the only thing to do when a OpenStandardChannel is received is to relay it upstream with and updated request id"), + ExtranonceSpaceEnded => write!(f, "No more available extranonces for downstream"), ImpossibleToCalculateMerkleRoot => write!(f, "Impossible to calculate merkle root"), GroupIdNotFound => write!(f, "Group id not found"), - ShareDoNotMatchAnyJob => write!(f, "A share has been recived but no job for it exist"), - ShareDoNotMatchAnyChannel => write!(f, "A share has been recived but no channel for it exist"), + ShareDoNotMatchAnyJob => write!(f, "A share has been received but no job for it exist"), + ShareDoNotMatchAnyChannel => write!(f, "A share has been received but no channel for it exist"), InvalidCoinbase => write!(f, "Coinbase prefix + extranonce + coinbase suffix is not a valid coinbase"), ValueRemainingNotUpdated => write!(f, "Value remaining in coinbase output was not correctly updated (it's equal to 0)"), UnknownOutputScriptType => write!(f, "Unknown script type in config"), @@ -189,7 +190,7 @@ impl Display for Error { TxVersionTooBig => write!(f, "Tx version can not be greater than i32::MAX"), TxVersionTooLow => write!(f, "Tx version can not be lower than 1"), TxDecodingError(e) => write!(f, "Impossible to decode tx: {:?}", e), - NotFoundChannelId => write!(f, "No downstream has been registred for this channel id"), + NotFoundChannelId => write!(f, "No downstream has been registered for this channel id"), NoValidJob => write!(f, "Impossible to create a standard job for channelA cause no valid job has been received from upstream yet"), NoValidTranslatorJob => write!(f, "Impossible to create a extended job for channel cause no valid job has been received from upstream yet"), NoTemplateForId => write!(f, "Impossible to retrieve a template for the required job id"), diff --git a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs index f0453fd8d..a2ec2aea1 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs @@ -41,6 +41,8 @@ use crate::{parsers::JobDeclaration, utils::Mutex}; use std::sync::Arc; + +/// see [`SendTo_`] pub type SendTo = SendTo_, ()>; use super::SendTo_; use crate::errors::Error; diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs index 918a32405..8edb43976 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs @@ -60,6 +60,7 @@ use const_sv2::*; use std::{fmt::Debug as D, sync::Arc}; use tracing::{debug, error, info, trace}; +/// see [`SendTo_`] pub type SendTo = SendTo_, Remote>; /// Represents supported channel types in a mining connection. diff --git a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs index a0a5b94ec..445a83922 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs @@ -42,6 +42,7 @@ use template_distribution_sv2::{ RequestTransactionDataSuccess, SetNewPrevHash, SubmitSolution, }; +/// see [`SendTo_`] pub type SendTo = SendTo_, ()>; use const_sv2::*; use core::convert::TryInto; diff --git a/protocols/v2/roles-logic-sv2/src/job_creator.rs b/protocols/v2/roles-logic-sv2/src/job_creator.rs index 999b8a2f1..ec2f1cda5 100644 --- a/protocols/v2/roles-logic-sv2/src/job_creator.rs +++ b/protocols/v2/roles-logic-sv2/src/job_creator.rs @@ -170,15 +170,15 @@ pub fn extended_job_from_custom_job( ) } -/// returns an extended job given the provided template from the Template Provider and other -/// Pool role related fields. -/// -/// Pool related arguments: -/// -/// * `coinbase_outputs`: coinbase output transactions specified by the pool. -/// * `job_id`: incremented job identifier specified by the pool. -/// * `version_rolling_allowed`: boolean specified by the channel. -/// * `extranonce_len`: extranonce length specified by the channel. +// returns an extended job given the provided template from the Template Provider and other +// Pool role related fields. +// +// Pool related arguments: +// +// * `coinbase_outputs`: coinbase output transactions specified by the pool. +// * `job_id`: incremented job identifier specified by the pool. +// * `version_rolling_allowed`: boolean specified by the channel. +// * `extranonce_len`: extranonce length specified by the channel. fn new_extended_job( new_template: &mut NewTemplate, coinbase_outputs: &mut [TxOut], @@ -235,8 +235,8 @@ fn new_extended_job( Ok(new_extended_mining_job) } -/// used to extract the coinbase transaction prefix for extended jobs -/// so the extranonce search space can be introduced +// used to extract the coinbase transaction prefix for extended jobs +// so the extranonce search space can be introduced fn coinbase_tx_prefix( coinbase: &Transaction, script_prefix_len: usize, @@ -259,15 +259,15 @@ fn coinbase_tx_prefix( r.try_into().map_err(Error::BinarySv2Error) } -/// used to extract the coinbase transaction suffix for extended jobs -/// so the extranonce search space can be introduced +// used to extract the coinbase transaction suffix for extended jobs +// so the extranonce search space can be introduced fn coinbase_tx_suffix( coinbase: &Transaction, extranonce_len: u8, script_prefix_len: usize, ) -> Result, Error> { let encoded = coinbase.serialize(); - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // If script_prefix_len is not 0 we are not in a test environment and the coinbase have the 0 // witness let segwit_bytes = match script_prefix_len { 0 => 0, @@ -320,8 +320,8 @@ fn get_bip_34_bytes(new_template: &NewTemplate, tx_version: i32) -> Result, version: i32, @@ -331,7 +331,7 @@ fn coinbase( pool_signature: String, extranonce_len: u8, ) -> Transaction { - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // If script_prefix_len is not 0 we are not in a test environment and the coinbase have the 0 // witness let witness = match bip34_bytes.len() { 0 => Witness::from_vec(vec![]), @@ -379,8 +379,8 @@ pub fn extended_job_to_non_segwit( coinbase_tx_suffix: stripped_tx.into_coinbase_tx_suffix()?, }) } -/// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix -/// to ensure miners are hashing with the correct coinbase +// Helper type to strip a segwit data from the coinbase_tx_prefix and coinbase_tx_suffix +// to ensure miners are hashing with the correct coinbase struct StrippedCoinbaseTx { version: u32, inputs: Vec>, @@ -391,7 +391,7 @@ struct StrippedCoinbaseTx { } impl StrippedCoinbaseTx { - /// create + // create fn from_coinbase(tx: Transaction, full_extranonce_len: usize) -> Result { let bip141_bytes_len = tx .input @@ -421,13 +421,13 @@ impl StrippedCoinbaseTx { }) } - /// the coinbase tx prefix is the LE bytes concatenation of the tx version and all - /// of the tx inputs minus the 32 bytes after the bip34 bytes in the script - /// and the last input's sequence (used as the first entry in the coinbase tx suffix). - /// The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce - /// space for the miner. We remove the bip141 marker and flag since it is only used for - /// computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root - // clippy allow because we dont want to consume self + // the coinbase tx prefix is the LE bytes concatenation of the tx version and all + // of the tx inputs minus the 32 bytes after the bip34 bytes in the script + // and the last input's sequence (used as the first entry in the coinbase tx suffix). + // The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce + // space for the miner. We remove the bip141 marker and flag since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self #[allow(clippy::wrong_self_convention)] fn into_coinbase_tx_prefix(&self) -> Result, errors::Error> { let mut inputs = self.inputs.clone(); @@ -446,11 +446,11 @@ impl StrippedCoinbaseTx { prefix.try_into().map_err(Error::BinarySv2Error) } - /// This coinbase tx suffix is the sequence of the last tx input plus - /// the serialized tx outputs and the lock time. Note we do not use the witnesses - /// (placed between txouts and lock time) since it is only used for - /// computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root - // clippy allow because we dont want to consume self + // This coinbase tx suffix is the sequence of the last tx input plus + // the serialized tx outputs and the lock time. Note we do not use the witnesses + // (placed between txouts and lock time) since it is only used for + // computing the `wtxid` and the legacy `txid` is what is used for computing the merkle root + // clippy allow because we don't want to consume self #[allow(clippy::wrong_self_convention)] fn into_coinbase_tx_suffix(&self) -> Result, errors::Error> { let mut suffix: Vec = vec![]; diff --git a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs index 4eac84734..e0589c526 100644 --- a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs +++ b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs @@ -54,7 +54,7 @@ struct BlockHeader<'a> { } impl<'a> BlockHeader<'a> { - /// calculates the sha256 blockhash of the header + // calculates the sha256 blockhash of the header #[allow(dead_code)] pub fn hash(&self) -> Target { let mut engine = sha256d::Hash::engine(); @@ -98,7 +98,7 @@ pub struct GroupChannelJobDispatcher { /// Used to signal if submitted shares correlate to valid jobs pub enum SendSharesResponse { - //ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), + /// ValidAndMeetUpstreamTarget((SubmitSharesStandard,SubmitSharesSuccess)), Valid(SubmitSharesStandard), Invalid(SubmitSharesError<'static>), } diff --git a/protocols/v2/roles-logic-sv2/src/lib.rs b/protocols/v2/roles-logic-sv2/src/lib.rs index a38e0ad97..c6ffe67bc 100644 --- a/protocols/v2/roles-logic-sv2/src/lib.rs +++ b/protocols/v2/roles-logic-sv2/src/lib.rs @@ -22,13 +22,13 @@ //! handlers::common::ParseDownstreamCommonMessages + //! handlers::mining::ParseDownstreamMiningMessages + //! -//! ProxyDownstreamConnetion: +//! ProxyDownstreamConnection: //! common_properties::IsDownstream + //! common_properties::IsMiningDownstream + //! handlers::common::ParseDownstreamCommonMessages + //! handlers::mining::ParseDownstreamMiningMessages + //! -//! ProxyUpstreamConnetion: +//! ProxyUpstreamConnection: //! common_properties::IsUpstream + //! common_properties::IsMiningUpstream + //! handlers::common::ParseUpstreamCommonMessages + diff --git a/protocols/v2/roles-logic-sv2/src/routing_logic.rs b/protocols/v2/roles-logic-sv2/src/routing_logic.rs index a23e62e4b..37ddbb7fa 100644 --- a/protocols/v2/roles-logic-sv2/src/routing_logic.rs +++ b/protocols/v2/roles-logic-sv2/src/routing_logic.rs @@ -274,20 +274,20 @@ impl< Sel: DownstreamMiningSelector + D, > MiningRouter for MiningProxyRoutingLogic { - /// Handles the `OpenStandardMiningChannel` message. - /// - /// This method processes the request to open a standard mining channel. It selects a suitable - /// upstream, updates the request ID to ensure uniqueness, and then delegates to - /// `on_open_standard_channel_request_header_only` to finalize the process. - /// - /// # Arguments - /// - `downstream`: The downstream requesting the channel opening. - /// - `request`: A mutable reference to the `OpenStandardMiningChannel` message. - /// - `downstream_mining_data`: Common data about the downstream mining setup. - /// - /// # Returns - /// - `Result>, Error>`: Returns the selected upstream for the downstream or an - /// error. + // Handles the `OpenStandardMiningChannel` message. + // + // This method processes the request to open a standard mining channel. It selects a suitable + // upstream, updates the request ID to ensure uniqueness, and then delegates to + // `on_open_standard_channel_request_header_only` to finalize the process. + // + // # Arguments + // - `downstream`: The downstream requesting the channel opening. + // - `request`: A mutable reference to the `OpenStandardMiningChannel` message. + // - `downstream_mining_data`: Common data about the downstream mining setup. + // + // # Returns + // - `Result>, Error>`: Returns the selected upstream for the downstream or an + // error. fn on_open_standard_channel( &mut self, downstream: Arc>, @@ -310,19 +310,19 @@ impl< self.on_open_standard_channel_request_header_only(downstream, request) } - /// Handles the `OpenStandardMiningChannelSuccess` message. - /// - /// This method processes the success message received from an upstream when a standard mining - /// channel is opened. It maps the request ID back to the original ID from the downstream and - /// updates the associated group and channel IDs in the upstream. - /// - /// # Arguments - /// - `upstream`: The upstream involved in the channel opening. - /// - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. - /// - /// # Returns - /// - `Result>, Error>`: Returns the downstream corresponding to the request or - /// an error. + // Handles the `OpenStandardMiningChannelSuccess` message. + // + // This method processes the success message received from an upstream when a standard mining + // channel is opened. It maps the request ID back to the original ID from the downstream and + // updates the associated group and channel IDs in the upstream. + // + // # Arguments + // - `upstream`: The upstream involved in the channel opening. + // - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. + // + // # Returns + // - `Result>, Error>`: Returns the downstream corresponding to the request or + // an error. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -352,17 +352,17 @@ impl< } } -/// Selects the upstream with the lowest total hash rate. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Arc>`: The upstream entity with the lowest total hash rate. -/// -/// # Panics -/// This function panics if the slice is empty, as it is internally guaranteed that this function -/// will only be called with non-empty vectors. +// Selects the upstream with the lowest total hash rate. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Arc>`: The upstream entity with the lowest total hash rate. +// +// # Panics +// This function panics if the slice is empty, as it is internally guaranteed that this function +// will only be called with non-empty vectors. fn minor_total_hr_upstream(ups: &mut [Arc>]) -> Arc> where Down: IsMiningDownstream + D, @@ -384,13 +384,13 @@ where .clone() // Unwrap is safe because the function only operates on non-empty vectors. } -/// Filters upstream entities that are not configured for header-only mining. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Vec>>`: A vector of upstream entities that are not header-only. +// Filters upstream entities that are not configured for header-only mining. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Vec>>`: A vector of upstream entities that are not header-only. fn filter_header_only(ups: &mut [Arc>]) -> Vec>> where Down: IsMiningDownstream + D, @@ -408,18 +408,18 @@ where .collect() } -/// Selects the most appropriate upstream entity based on specific criteria. -/// -/// # Criteria -/// - If only one upstream is available, it is selected. -/// - If multiple upstreams exist, preference is given to those not configured as header-only. -/// - Among the remaining upstreams, the one with the lowest total hash rate is selected. -/// -/// # Arguments -/// - `ups`: A mutable slice of upstream mining entities. -/// -/// # Returns -/// - `Option>>`: The selected upstream entity, or `None` if none are available. +// Selects the most appropriate upstream entity based on specific criteria. +// +// # Criteria +// - If only one upstream is available, it is selected. +// - If multiple upstreams exist, preference is given to those not configured as header-only. +// - Among the remaining upstreams, the one with the lowest total hash rate is selected. +// +// # Arguments +// - `ups`: A mutable slice of upstream mining entities. +// +// # Returns +// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstream(ups: &mut [Arc>]) -> Option>> where Down: IsMiningDownstream + D, @@ -443,18 +443,18 @@ impl< Sel: DownstreamMiningSelector + D, > MiningProxyRoutingLogic { - /// Selects an upstream entity from a list of available upstreams. - /// - /// # Arguments - /// - `ups`: A mutable slice of upstream mining entities. - /// - /// # Returns - /// - `Option>>`: The selected upstream entity, or `None` if none are available. + // Selects an upstream entity from a list of available upstreams. + // + // # Arguments + // - `ups`: A mutable slice of upstream mining entities. + // + // # Returns + // - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstreams(ups: &mut [Arc>]) -> Option>> { select_upstream(ups) } - /// Handles the `SetupConnection` process for header-only mining downstreams. + /// Handles the `SetupConnection` process for header-only mining downstream's. /// /// This method selects compatible upstreams, assigns connection flags, and maps the /// downstream to the selected upstreams. @@ -487,7 +487,7 @@ impl< Ok((downstream_data, message)) } - /// Handles a standard channel opening request for header-only mining downstreams. + /// Handles a standard channel opening request for header-only mining downstream's. /// /// # Arguments /// - `downstream`: The downstream mining entity. diff --git a/protocols/v2/roles-logic-sv2/src/selectors.rs b/protocols/v2/roles-logic-sv2/src/selectors.rs index fcfcffeb5..ceb4dfed3 100644 --- a/protocols/v2/roles-logic-sv2/src/selectors.rs +++ b/protocols/v2/roles-logic-sv2/src/selectors.rs @@ -48,18 +48,18 @@ use std::{collections::HashMap, fmt::Debug as D, sync::Arc}; /// to facilitate efficient message routing. #[derive(Debug, Clone, Default)] pub struct ProxyDownstreamMiningSelector { - /// Maps request IDs to their corresponding downstream nodes. + // Maps request IDs to their corresponding downstream nodes. request_id_to_remotes: HashMap>, BuildNoHashHasher>, - /// Maps group channel IDs to a list of downstream nodes. + // Maps group channel IDs to a list of downstream nodes. channel_id_to_downstreams: HashMap>>, BuildNoHashHasher>, - /// Maps standard channel IDs to a single downstream node. + // Maps standard channel IDs to a single downstream node. channel_id_to_downstream: HashMap>, BuildNoHashHasher>, } impl ProxyDownstreamMiningSelector { /// Creates a new `ProxyDownstreamMiningSelector`. /// - /// This initializes the internal mappings with `nohash` hashers for performance. + /// This initializes the internal mappings with `nohash` hasher for performance. pub fn new() -> Self { Self { request_id_to_remotes: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -80,10 +80,10 @@ impl ProxyDownstreamMiningSelector { } impl ProxyDownstreamMiningSelector { - /// Removes a downstream node from all mappings. - /// - /// # Arguments - /// - `d`: The downstream node to be removed. + // Removes a downstream node from all mappings. + // + // # Arguments + // - `d`: The downstream node to be removed. fn _remove_downstream(&mut self, d: &Arc>) { self.request_id_to_remotes.retain(|_, v| !Arc::ptr_eq(v, d)); self.channel_id_to_downstream @@ -94,25 +94,25 @@ impl ProxyDownstreamMiningSelector { impl DownstreamMiningSelector for ProxyDownstreamMiningSelector { - /// Registers a request ID and its associated downstream node. - /// - /// # Arguments - /// - `request_id`: The unique request ID. - /// - `downstream`: The downstream node associated with the request. + // Registers a request ID and its associated downstream node. + // + // # Arguments + // - `request_id`: The unique request ID. + // - `downstream`: The downstream node associated with the request. fn on_open_standard_channel_request(&mut self, request_id: u32, downstream: Arc>) { self.request_id_to_remotes.insert(request_id, downstream); } - /// Finalizes the mapping of a standard channel to its downstream node. - /// - /// # Arguments - /// - `request_id`: The request ID used during the channel opening. - /// - `g_channel_id`: The group channel ID. - /// - `channel_id`: The specific standard channel ID. - /// - /// # Returns - /// - `Ok`: The downstream node associated with the request. - /// - `Err`: If the request ID is unknown. + // Finalizes the mapping of a standard channel to its downstream node. + // + // # Arguments + // - `request_id`: The request ID used during the channel opening. + // - `g_channel_id`: The group channel ID. + // - `channel_id`: The specific standard channel ID. + // + // # Returns + // - `Ok`: The downstream node associated with the request. + // - `Err`: If the request ID is unknown. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -135,25 +135,25 @@ impl DownstreamMiningSelector Ok(downstream) } - /// Retrieves all downstream nodes associated with a standard/group channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard/group channel ID. - /// - /// # Returns - /// - `Some`: A reference to the vector of downstream nodes. - /// - `None`: If no nodes are associated with the channel. + // Retrieves all downstream nodes associated with a standard/group channel ID. + // + // # Arguments + // - `channel_id`: The standard/group channel ID. + // + // # Returns + // - `Some`: A reference to the vector of downstream nodes. + // - `None`: If no nodes are associated with the channel. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>> { self.channel_id_to_downstreams.get(&channel_id) } - /// Removes all downstream nodes associated with a standard/group channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard/group channel ID. - /// - /// # Returns - /// A vector of the removed downstream nodes. + // Removes all downstream nodes associated with a standard/group channel ID. + // + // # Arguments + // - `channel_id`: The standard/group channel ID. + // + // # Returns + // A vector of the removed downstream nodes. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>> { let downs = self .channel_id_to_downstreams @@ -165,10 +165,10 @@ impl DownstreamMiningSelector downs } - /// Removes a specific downstream node from all mappings. - /// - /// # Arguments - /// - `d`: The downstream node to be removed. + // Removes a specific downstream node from all mappings. + // + // # Arguments + // - `d`: The downstream node to be removed. fn remove_downstream(&mut self, d: &Arc>) { for dws in self.channel_id_to_downstreams.values_mut() { dws.retain(|node| !Arc::ptr_eq(node, d)); @@ -177,22 +177,22 @@ impl DownstreamMiningSelector self._remove_downstream(d); } - /// Retrieves the downstream node associated with a specific standard channel ID. - /// - /// # Arguments - /// - `channel_id`: The standard channel ID. - /// - /// # Returns - /// - `Some`: The downstream node. - /// - `None`: If no node is associated with the channel. + // Retrieves the downstream node associated with a specific standard channel ID. + // + // # Arguments + // - `channel_id`: The standard channel ID. + // + // # Returns + // - `Some`: The downstream node. + // - `None`: If no node is associated with the channel. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>> { self.channel_id_to_downstream.get(&channel_id).cloned() } - /// Retrieves all downstream nodes currently managed by this selector. - /// - /// # Returns - /// A vector of downstream nodes. + // Retrieves all downstream nodes currently managed by this selector. + // + // # Returns + // A vector of downstream nodes. fn get_all_downstreams(&self) -> Vec>> { self.channel_id_to_downstream.values().cloned().collect() } @@ -233,22 +233,22 @@ pub trait DownstreamMiningSelector: channel_id: u32, ) -> Result>, Error>; - /// Retrieves all downstreams associated with a channel ID. + /// Retrieves all downstream's associated with a channel ID. /// /// # Arguments /// - `channel_id`: The channel ID to query. /// /// # Returns - /// - `Option<&Vec>>>`: The list of downstreams or `None`. + /// - `Option<&Vec>>>`: The list of downstream's or `None`. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>>; - /// Removes all downstreams associated with a channel ID. + /// Removes all downstream's associated with a channel ID. /// /// # Arguments - /// - `channel_id`: The channel ID to remove downstreams from. + /// - `channel_id`: The channel ID to remove downstream's from. /// /// # Returns - /// - `Vec>>`: The removed downstreams. + /// - `Vec>>`: The removed downstream's. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>>; /// Removes a specific downstream. @@ -266,10 +266,10 @@ pub trait DownstreamMiningSelector: /// - `Option>>`: The downstream or `None`. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>>; - /// Retrieves all downstreams. + /// Retrieves all downstream's. /// /// # Returns - /// - `Vec>>`: All downstreams. + /// - `Vec>>`: All downstream's. fn get_all_downstreams(&self) -> Vec>>; } @@ -298,10 +298,10 @@ impl NullDownstreamMiningSelector { } impl DownstreamMiningSelector for NullDownstreamMiningSelector { - /// Called when a standard channel open request is received. - /// - /// This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op - /// implementation. + // Called when a standard channel open request is received. + // + // This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op + // implementation. fn on_open_standard_channel_request( &mut self, _request_id: u32, @@ -310,9 +310,9 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_request") } - /// Called when a standard channel open request is successful. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Called when a standard channel open request is successful. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn on_open_standard_channel_success( &mut self, _request_id: u32, @@ -322,37 +322,37 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_success") } - /// Retrieves the downstreams in a specific channel. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves the downstream'ss in a specific channel. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn get_downstreams_in_channel(&self, _channel_id: u32) -> Option<&Vec>>> { unreachable!("get_downstreams_in_channel") } - /// Removes downstreams in a specific channel. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Removes downstream's in a specific channel. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstreams_in_channel(&mut self, _channel_id: u32) -> Vec>> { unreachable!("remove_downstreams_in_channel") } - /// Removes a specific downstream node. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Removes a specific downstream node. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn remove_downstream(&mut self, _d: &Arc>) { unreachable!("remove_downstream") } - /// Retrieves the downstream associated with a specific channel ID. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves the downstream associated with a specific channel ID. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn downstream_from_channel_id(&self, _channel_id: u32) -> Option>> { unreachable!("downstream_from_channel_id") } - /// Retrieves all downstream nodes managed by this selector. - /// - /// This method is unreachable in `NullDownstreamMiningSelector`. + // Retrieves all downstream nodes managed by this selector. + // + // This method is unreachable in `NullDownstreamMiningSelector`. fn get_all_downstreams(&self) -> Vec>> { unreachable!("get_all_downstreams") } @@ -459,14 +459,14 @@ impl< Up: IsMiningUpstream, > UpstreamMiningSelctor for GeneralMiningSelector { - /// Handles the `SetupConnection` process and determines the pairable upstream nodes. - /// - /// # Arguments - /// - `pair_settings`: The settings for pairing downstream and upstream nodes. - /// - /// # Returns - /// - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. - /// - `Err`: If no upstreams are pairable. + // Handles the `SetupConnection` process and determines the pairable upstream nodes. + // + // # Arguments + // - `pair_settings`: The settings for pairing downstream and upstream nodes. + // + // # Returns + // - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. + // - `Err`: If no upstreams are pairable. fn on_setup_connection( &mut self, pair_settings: &PairSettings, @@ -489,14 +489,14 @@ impl< Err(Error::NoPairableUpstream((2, 2, 0))) } - /// Retrieves an upstream node by its ID. - /// - /// # Arguments - /// - `upstream_id`: The unique ID of the upstream node. - /// - /// # Returns - /// - `Some`: The upstream node. - /// - `None`: If no upstream is found. + // Retrieves an upstream node by its ID. + // + // # Arguments + // - `upstream_id`: The unique ID of the upstream node. + // + // # Returns + // - `Some`: The upstream node. + // - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>> { self.id_to_upstream.get(&upstream_id).cloned() } diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index f818eba1f..149958fcf 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -282,17 +282,17 @@ pub enum InputError { /// how do we set the adequate target? /// /// According to \[1] and \[2], it is possible to model the probability of finding a block with -/// a random variable X whose distribution is negtive hypergeometric \[3]. +/// a random variable X whose distribution is negative hypergeometric \[3]. /// Such a variable is characterized as follows. Say that there are n (2^256) elements (possible /// hash values), of which t (values <= target) are defined as success and the remaining as -/// failures. The variable X has codomain the positive integers, and X=k is the event where element +/// failures. The variable X has co-domain the positive integers, and X=k is the event where element /// are drawn one after the other, without replacement, and only the k-th element is successful. /// The expected value of this variable is (n-t)/(t+1). /// So, on average, a miner has to perform (2^256-t)/(t+1) hashes before finding hash whose value /// is below the target t. If the pool wants, on average, a share every s seconds, then, on /// average, the miner has to perform h*s hashes before finding one that is smaller than the /// target, where h is the miner's hashrate. Therefore, s*h= (2^256-t)/(t+1). If we consider h the -/// global bitcoin's hashrate, s = 600 seconds and t the bicoin global target, then, for all the +/// global bitcoin's hashrate, s = 600 seconds and t the bitcoin global target, then, for all the /// blocks we tried, the two members of the equations have the same order of magnitude and, most /// of the cases, they coincide with the first two digits. We take this as evidence of the /// correctness of our calculations. Thus, if the pool wants on average a share every s @@ -372,7 +372,7 @@ pub fn hash_rate_from_target(target: U256<'static>, share_per_min: f64) -> Resul let max_target = Uint256::from_be_bytes(max_target); let numerator = max_target - (target - Uint256::one()); - // now we calcualte the denominator s(t+1) + // now we calculate the denominator s(t+1) // *100 here to move the fractional bit up so we can make this an int later let shares_occurrency_frequence = 60_f64 / (share_per_min) * 100.0; // note that t+1 cannot be zero because t unsigned. Therefore the denominator is zero if and @@ -416,7 +416,7 @@ pub struct GroupId { } impl GroupId { - /// New GroupId it starts with groups 0, since 0 is reserved for hom downstreams + /// New GroupId it starts with groups 0, since 0 is reserved for hom downstream's pub fn new() -> Self { Self { group_ids: Id::new(), @@ -429,8 +429,8 @@ impl GroupId { self.group_ids.next() } - /// Create a channel for a paricular group and return the channel id - /// _group_id is left for a future use of this API where we have an hirearchy of ids so that we + /// Create a channel for a particular group and return the channel id + /// _group_id is left for a future use of this API where we have an hierarchy of ids so that we /// don't break old versions pub fn new_channel_id(&mut self, _group_id: u32) -> u32 { self.channel_ids.next() From 2a8fb12090225d7168a669abba3468eaaf3d1533 Mon Sep 17 00:00:00 2001 From: plebhash Date: Wed, 11 Dec 2024 20:08:02 +0700 Subject: [PATCH 12/18] docs for job_creator module --- protocols/v2/roles-logic-sv2/src/job_creator.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/protocols/v2/roles-logic-sv2/src/job_creator.rs b/protocols/v2/roles-logic-sv2/src/job_creator.rs index ec2f1cda5..cc3f0fe7b 100644 --- a/protocols/v2/roles-logic-sv2/src/job_creator.rs +++ b/protocols/v2/roles-logic-sv2/src/job_creator.rs @@ -50,6 +50,7 @@ pub fn tx_outputs_to_costum_scripts(tx_outputs: &[u8]) -> Vec { } impl JobsCreators { + /// constructor pub fn new(extranonce_len: u8) -> Self { Self { lasts_new_template: Vec::new(), @@ -62,6 +63,7 @@ impl JobsCreators { } } + /// get template id from job pub fn get_template_id_from_job(&self, job_id: u32) -> Option { self.job_to_template_id.get(&job_id).map(|x| x - 1) } @@ -139,6 +141,7 @@ impl JobsCreators { } } +/// convert custom job into extended job pub fn extended_job_from_custom_job( referenced_job: &mining_sv2::SetCustomMiningJob, pool_signature: String, From d3f4520e1caf588931115f24f01cc136fafd0736 Mon Sep 17 00:00:00 2001 From: plebhash Date: Fri, 20 Dec 2024 10:27:51 +0700 Subject: [PATCH 13/18] fix framing_sv2 with_serde feature on roles_logic_sv2 --- protocols/v2/roles-logic-sv2/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/protocols/v2/roles-logic-sv2/Cargo.toml b/protocols/v2/roles-logic-sv2/Cargo.toml index 2aa4a07a4..ecb17fe5d 100644 --- a/protocols/v2/roles-logic-sv2/Cargo.toml +++ b/protocols/v2/roles-logic-sv2/Cargo.toml @@ -35,6 +35,7 @@ toml = {git = "https://github.com/diondokter/toml-rs", default-features = false [features] with_serde = [ "serde", +"framing_sv2/with_serde", "binary_sv2/with_serde", "common_messages_sv2/with_serde", "template_distribution_sv2/with_serde", From 1e94ae8824313bac36a1c79362438068e9e31695 Mon Sep 17 00:00:00 2001 From: RJ Rybarczyk Date: Sat, 21 Dec 2024 18:48:59 -0500 Subject: [PATCH 14/18] Common properties doc additions --- .../roles-logic-sv2/src/common_properties.rs | 205 ++++++++++++++---- 1 file changed, 163 insertions(+), 42 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/common_properties.rs b/protocols/v2/roles-logic-sv2/src/common_properties.rs index d36c901da..9198fcdec 100644 --- a/protocols/v2/roles-logic-sv2/src/common_properties.rs +++ b/protocols/v2/roles-logic-sv2/src/common_properties.rs @@ -1,4 +1,9 @@ -//! This module defines traits for properties that every SRI-based application should implement +//! # Common Properties for Stratum V2 (Sv2) Roles +//! +//! Defines common properties, traits, and utilities for implementing upstream and downstream +//! nodes. It provides abstractions for features like connection pairing, mining job distribution, +//! and channel management. These definitions form the foundation for consistent communication and +//! behavior across Sv2 roles/applications. use crate::selectors::{ DownstreamMiningSelector, DownstreamSelector, NullDownstreamMiningSelector, @@ -8,32 +13,82 @@ use mining_sv2::{Extranonce, Target}; use nohash_hasher::BuildNoHashHasher; use std::{collections::HashMap, fmt::Debug as D}; -/// Defines a mining downstream node at the most basic level +/// Defines a mining downstream node at the most basic level. #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] pub struct CommonDownstreamData { + /// Header-only mining flag. + /// + /// Enables the processing of block headers only, leaving transaction management and template + /// construction to the upstream node. It reduces the amount of data processed and transmitted + /// by the downstream node, making it useful when bandwidth is limited and transmitting full + /// block templates is costly. + /// + /// - `true`: The downstream node only processes block headers, relying on the upstream for + /// transaction management. + /// - `false`: The downstream node handles full blocks. pub header_only: bool, + + /// Work selection flag. + /// + /// Enables the selection of which transactions or templates the node will work on. It allows + /// for more autonomy for downstream nodes to define specific version bits to roll, adjust the + /// difficulty target, apply `timestamp` range constraints, etc. + /// + /// - `true`: The downstream node can modify or customize the mining job, such as choosing + /// specific transactions to include in a block template. + /// - `false`: The downstream node strictly follows the work provided by the upstream, such as + /// pre-constructed templates from a pool. pub work_selection: bool, + + /// Version rolling flag. + /// + /// Enables rolling the block header version bits which allows for more unique hash generation + /// on the same block template by expanding the nonce-space. Used when other fields (e.g. + /// `nonce` or `extranonce`) are fully exhausted. + /// + /// - `true`: The downstream supports version rolling and can modify specific bits in the + /// version field. This is useful for increasing hash rate efficiency by exploring a larger + /// solution space. + /// - `false`: The downstream does not support version rolling and relies on the upstream to + /// provide jobs with adjusted version fields. pub version_rolling: bool, } -/// SetupConnection sugared +/// Encapsulates settings for pairing upstream and downstream nodes. +/// +/// Simplifies the [`SetupConnection`] configuration process by bundling the protocol, version +/// range, and flag settings. #[derive(Debug, Copy, Clone)] pub struct PairSettings { + /// Role the settings apply to. pub protocol: Protocol, + + /// Minimum protocol version the node supports. pub min_v: u16, + + /// Minimum protocol version the node supports. pub max_v: u16, + + /// Flags indicating optional protocol features the node supports (e.g. header-only mining, + /// Noise protocol support, job configuration parameters, etc.). Each protocol field as its own + /// flags. pub flags: u32, } -/// General properties that every Sv2 compatible upstream node must implement. +/// Properties defining behaviors common to all Sv2 upstream nodes. pub trait IsUpstream + ?Sized> { - /// Used to bitcoin protocol version for the channel. + /// Returns the protocol version used by the upstream node. fn get_version(&self) -> u16; - /// Used to get flags for the defined sv2 message protocol + + /// Returns the flags indicating the upstream node's protocol capabilities. fn get_flags(&self) -> u32; - /// Used to check if the upstream supports the protocol that the downstream wants to use + + /// Lists the protocols supported by the upstream node. + /// + /// Used to check if the upstream supports the protocol that the downstream wants to use. fn get_supported_protocols(&self) -> Vec; - /// Checking if the upstream supports the protocol that the downstream wants to use. + + /// Verifies if the upstream can pair with the given downstream settings. fn is_pairable(&self, pair_settings: &PairSettings) -> bool { let protocol = pair_settings.protocol; let min_v = pair_settings.min_v; @@ -44,71 +99,117 @@ pub trait IsUpstream + ?Sized> let check_flags = SetupConnection::check_flags(protocol, self.get_flags(), flags); check_version && check_flags } - /// Should return the channel id + + /// Returns the channel ID managed by the upstream node. fn get_id(&self) -> u32; - /// Should return a request id mapper for viewing and handling request ids. + + /// Provides a request ID mapper for viewing and managing upstream-downstream communication. fn get_mapper(&mut self) -> Option<&mut RequestIdMapper>; - /// Should return the selector of the Downstream node. See [`crate::selectors`]. + + /// Returns the selector ([`crate::selectors`] for managing downstream nodes. fn get_remote_selector(&mut self) -> &mut Sel; } -/// Channel to be opened with the upstream nodes. +/// The types of channels that can be opened with upstream nodes. #[derive(Debug, Clone, Copy)] pub enum UpstreamChannel { - /// nominal hash rate + /// A standard channel with a nominal hash rate. + /// + /// Typically used for mining devices with a direct connection to an upstream node (e.g. a pool + /// or proxy). The hashrate is specified as a `f32` value, representing the expected + /// computational capacity of the miner. Standard(f32), + + /// A grouped channel for aggregated mining. + /// + /// Aggregates mining work for multiple devices or clients under a single channel, enabling the + /// upstream to manage work distribution and result aggregation for an entire group of miners. + /// + /// Typically used by a mining proxy managing multiple downstream miners. Group, + + /// An extended channel for additional features. + /// + /// Provides additional features or capabilities beyond standard and group channels, + /// accommodating features like custom job templates or experimental protocol extensions. Extended, } +/// Properties of a standard mining channel. +/// +/// Standard channels are intended to be used by end mining devices with a nominal hashrate, where +/// each device operates on an independent connection to its upstream. #[derive(Debug, Clone)] -/// Standard channels are intended to be used by end mining devices. pub struct StandardChannel { - /// Newly assigned identifier of the channel, stable for the whole lifetime of the connection. - /// e.g. it is used for broadcasting new jobs by `NewExtendedMiningJob` + /// Identifies a specific channel, unique to each mining connection. + /// + /// Dynamically assigned when a mining connection is established (as part of the negotiation + /// process) to avoid conflicts with other connections (e.g. other mining devices) managed by + /// the same upstream node. The identifier remains stable for the whole lifetime of the + /// connection. + /// + /// Used for broadcasting new jobs by [`mining_sv2::NewExtendedMiningJob`]. pub channel_id: u32, - /// Identifier of the group where the standard channel belongs + + /// Identifies a specific group in which the standard channel belongs. pub group_id: u32, - /// Initial target for the mining channel + + /// Initial difficulty target assigned to the mining. pub target: Target, - /// Extranonce bytes which need to be added to the coinbase to form a fully valid submission: - /// (full coinbase = coinbase_tx_prefix + extranonce_prefix + extranonce + coinbase_tx_suffix). + + /// Additional nonce value used to differentiate shares within the same channel. + /// + /// Helps to avoid nonce collisions when multiple mining devices are working on the same job. + /// + /// The extranonce bytes are added to the coinbase to form a fully valid submission: + /// `full coinbase = coinbase_tx_prefix + extranonce_prefix + extranonce + coinbase_tx_suffix` pub extranonce: Extranonce, } -/// General properties that every Sv2 compatible mining upstream node must implement. +/// Properties of a Sv2-compatible mining upstream node. +/// +/// This trait extends [`IsUpstream`] with additional functionality specific to mining, such as +/// hashrate management and channel updates. pub trait IsMiningUpstream + ?Sized>: IsUpstream { - /// should return total hash rate local to the node + /// Returns the total hashrate managed by the upstream node. fn total_hash_rate(&self) -> u64; - /// add hashrate to the node + + /// Adds hash rate to the upstream node. fn add_hash_rate(&mut self, to_add: u64); - /// get open channels on the node + + /// Returns the list of open channels on the upstream node. fn get_opened_channels(&mut self) -> &mut Vec; - /// update channels + + /// Updates the list of channels managed by the upstream node. fn update_channels(&mut self, c: UpstreamChannel); - /// check if node is limited to hom + + /// Checks if the upstream node supports header-only mining. fn is_header_only(&self) -> bool { has_requires_std_job(self.get_flags()) } } -/// General properties that every Sv2 compatible downstream node must implement. +/// Properties defining behaviors common to all Sv2 downstream nodes. pub trait IsDownstream { - /// get downstream mining data + /// Returns the common properties of the downstream node (e.g. support for header-only mining, + /// work selection, and version rolling). fn get_downstream_mining_data(&self) -> CommonDownstreamData; } -/// General properties that every Sv2 compatible mining downstream node must implement. +/// Properties of a SV2-compatible mining downstream node. +/// +/// This trait extends [`IsDownstream`] with additional functionality specific to mining, such as +/// header-only mining checks. pub trait IsMiningDownstream: IsDownstream { - /// check if node is doing hom + /// Checks if the downstream node supports header-only mining. fn is_header_only(&self) -> bool { self.get_downstream_mining_data().header_only } } -// Implemented for the NullDownstreamMiningSelector +// Implemented for the `NullDownstreamMiningSelector`. impl IsUpstream for () { fn get_version(&self) -> u16 { unreachable!("Null upstream do not have a version"); @@ -134,7 +235,7 @@ impl IsUpstream for } } -// Implemented for the NullDownstreamMiningSelector +// Implemented for the `NullDownstreamMiningSelector`. impl IsDownstream for () { fn get_downstream_mining_data(&self) -> CommonDownstreamData { unreachable!("Null downstream do not have mining data"); @@ -160,18 +261,35 @@ impl IsMiningUpstream downstream ids + // A mapping between upstream-assigned request IDs and the original downstream IDs. + // + // In the hashmap, the key is the upstream request ID and the value is the corresponding + // downstream request ID. `BuildNoHashHasher` is an optimization to bypass the hashing step for + // integer keys. request_ids_map: HashMap>, + + // A counter for assigning unique request IDs to upstream nodes, incrementing after every + // assignment. next_id: u32, } impl RequestIdMapper { - /// Builds a new `RequestIdMapper` initialized with an empty hashmap and initializes `next_id` - /// to `0`. + /// Creates a new [`RequestIdMapper`] instance. pub fn new() -> Self { Self { request_ids_map: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -179,16 +297,19 @@ impl RequestIdMapper { } } - /// Updates the `RequestIdMapper` with a new upstream/downstream mapping. + /// Assigns a new upstream request ID for a request sent by a downstream node. + /// + /// Ensures every request forwarded to the upstream node has a unique ID while retaining + /// traceability to the original downstream request. pub fn on_open_channel(&mut self, id: u32) -> u32 { - let new_id = self.next_id; - self.next_id += 1; + let new_id = self.next_id; // Assign new upstream ID + self.next_id += 1; // Increment next_id for future requests - self.request_ids_map.insert(new_id, id); + self.request_ids_map.insert(new_id, id); // Map new upstream ID to downstream ID new_id } - /// Removes a upstream/downstream mapping from the `RequestIdMapper`. + /// Removes the mapping for a request ID once the response has been processed. pub fn remove(&mut self, upstream_id: u32) -> Option { self.request_ids_map.remove(&upstream_id) } From 088748552c31549a8a8c653dc40980bbe95e3804 Mon Sep 17 00:00:00 2001 From: RJ Rybarczyk Date: Mon, 23 Dec 2024 17:11:59 -0500 Subject: [PATCH 15/18] Selectors doc additions --- protocols/v2/roles-logic-sv2/src/selectors.rs | 264 ++++-------------- 1 file changed, 58 insertions(+), 206 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/selectors.rs b/protocols/v2/roles-logic-sv2/src/selectors.rs index ceb4dfed3..a189a3054 100644 --- a/protocols/v2/roles-logic-sv2/src/selectors.rs +++ b/protocols/v2/roles-logic-sv2/src/selectors.rs @@ -1,38 +1,8 @@ +//! # Selectors and Message Routing +//! //! This module provides selectors and routing logic for managing downstream and upstream nodes //! in a mining proxy environment. Selectors help determine the appropriate remote(s) to relay or //! send messages to. -//! -//! ## Components -//! -//! - **`ProxyDownstreamMiningSelector`**: A selector for managing downstream nodes in a mining -//! proxy, mapping requests and channel IDs to specific downstream nodes or groups. -//! - **`NullDownstreamMiningSelector`**: A no-op selector for cases where routing logic is -//! unnecessary, commonly used in test scenarios. -//! - **`GeneralMiningSelector`**: A flexible upstream selector that matches downstream nodes with -//! compatible upstream nodes based on pairing settings and flags. -//! -//! ## Traits -//! -//! - **`DownstreamSelector`**: Base trait for all downstream selectors. -//! - **`DownstreamMiningSelector`**: Specialized trait for selectors managing mining-specific -//! downstream nodes. -//! - **`UpstreamSelector`**: Base trait for upstream node selectors. -//! - **`UpstreamMiningSelctor`**: Specialized trait for selectors managing upstream mining nodes. -//! -//! ## Details -//! -//! ### ProxyDownstreamMiningSelector -//! - Manages mappings for request IDs, channel IDs, and downstream groups. -//! - Provides methods to handle standard channel operations, such as opening channels and -//! retrieving or removing downstream nodes associated with a channel. -//! -//! ### NullDownstreamMiningSelector -//! - Implements all required traits but panics if called. -//! - Useful for minimal setups or as a placeholder in tests. -//! -//! ### GeneralMiningSelector -//! - Matches downstream nodes to upstream nodes based on pairing compatibility. -//! - Tracks upstream nodes and their IDs for efficient lookups. use crate::{ common_properties::{IsDownstream, IsMiningDownstream, IsMiningUpstream, PairSettings}, @@ -42,25 +12,26 @@ use crate::{ use nohash_hasher::BuildNoHashHasher; use std::{collections::HashMap, fmt::Debug as D, sync::Arc}; -/// A selector used for routing messages to specific downstream mining nodes. +/// Proxy selector for routing messages to downstream mining nodes. /// -/// This structure maintains mappings for request IDs, channel IDs, and downstream nodes -/// to facilitate efficient message routing. +/// Maintains mappings for request IDs, channel IDs, and downstream nodes to facilitate message +/// routing. #[derive(Debug, Clone, Default)] pub struct ProxyDownstreamMiningSelector { // Maps request IDs to their corresponding downstream nodes. request_id_to_remotes: HashMap>, BuildNoHashHasher>, + // Maps group channel IDs to a list of downstream nodes. channel_id_to_downstreams: HashMap>>, BuildNoHashHasher>, + // Maps standard channel IDs to a single downstream node. channel_id_to_downstream: HashMap>, BuildNoHashHasher>, } impl ProxyDownstreamMiningSelector { - /// Creates a new `ProxyDownstreamMiningSelector`. - /// - /// This initializes the internal mappings with `nohash` hasher for performance. + /// Creates a new [`ProxyDownstreamMiningSelector`] instance. pub fn new() -> Self { + // `BuildNoHashHasher` is an optimization to bypass the hashing step for integer keys Self { request_id_to_remotes: HashMap::with_hasher(BuildNoHashHasher::default()), channel_id_to_downstreams: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -68,9 +39,7 @@ impl ProxyDownstreamMiningSelector { } } - /// Creates a new `ProxyDownstreamMiningSelector` wrapped in a mutex and an `Arc`. - /// - /// This is useful for concurrent environments where shared ownership is needed. + /// Creates a new [`ProxyDownstreamMiningSelector`] instance wrapped in an `Arc`. pub fn new_as_mutex() -> Arc> where Self: Sized, @@ -80,10 +49,7 @@ impl ProxyDownstreamMiningSelector { } impl ProxyDownstreamMiningSelector { - // Removes a downstream node from all mappings. - // - // # Arguments - // - `d`: The downstream node to be removed. + // Removes a specific downstream node from all mappings. fn _remove_downstream(&mut self, d: &Arc>) { self.request_id_to_remotes.retain(|_, v| !Arc::ptr_eq(v, d)); self.channel_id_to_downstream @@ -94,25 +60,11 @@ impl ProxyDownstreamMiningSelector { impl DownstreamMiningSelector for ProxyDownstreamMiningSelector { - // Registers a request ID and its associated downstream node. - // - // # Arguments - // - `request_id`: The unique request ID. - // - `downstream`: The downstream node associated with the request. + /// Records a request to open a standard channel with an associated downstream node. fn on_open_standard_channel_request(&mut self, request_id: u32, downstream: Arc>) { self.request_id_to_remotes.insert(request_id, downstream); } - // Finalizes the mapping of a standard channel to its downstream node. - // - // # Arguments - // - `request_id`: The request ID used during the channel opening. - // - `g_channel_id`: The group channel ID. - // - `channel_id`: The specific standard channel ID. - // - // # Returns - // - `Ok`: The downstream node associated with the request. - // - `Err`: If the request ID is unknown. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -136,24 +88,10 @@ impl DownstreamMiningSelector } // Retrieves all downstream nodes associated with a standard/group channel ID. - // - // # Arguments - // - `channel_id`: The standard/group channel ID. - // - // # Returns - // - `Some`: A reference to the vector of downstream nodes. - // - `None`: If no nodes are associated with the channel. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>> { self.channel_id_to_downstreams.get(&channel_id) } - // Removes all downstream nodes associated with a standard/group channel ID. - // - // # Arguments - // - `channel_id`: The standard/group channel ID. - // - // # Returns - // A vector of the removed downstream nodes. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>> { let downs = self .channel_id_to_downstreams @@ -165,10 +103,6 @@ impl DownstreamMiningSelector downs } - // Removes a specific downstream node from all mappings. - // - // # Arguments - // - `d`: The downstream node to be removed. fn remove_downstream(&mut self, d: &Arc>) { for dws in self.channel_id_to_downstreams.values_mut() { dws.retain(|node| !Arc::ptr_eq(node, d)); @@ -177,22 +111,10 @@ impl DownstreamMiningSelector self._remove_downstream(d); } - // Retrieves the downstream node associated with a specific standard channel ID. - // - // # Arguments - // - `channel_id`: The standard channel ID. - // - // # Returns - // - `Some`: The downstream node. - // - `None`: If no node is associated with the channel. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>> { self.channel_id_to_downstream.get(&channel_id).cloned() } - // Retrieves all downstream nodes currently managed by this selector. - // - // # Returns - // A vector of downstream nodes. fn get_all_downstreams(&self) -> Vec>> { self.channel_id_to_downstream.values().cloned().collect() } @@ -200,32 +122,22 @@ impl DownstreamMiningSelector impl DownstreamSelector for ProxyDownstreamMiningSelector {} -/// Implemented by a selector used by an upstream mining node or and upstream mining node -/// abstraction in order to find the right downstream to which a message should be sent or relayed. +/// Specialized trait for selectors managing downstream mining nodes. +/// +/// Logic for an upstream mining node to locate the correct downstream node to which a message +/// should be sent or relayed. pub trait DownstreamMiningSelector: DownstreamSelector { - /// Handles a request to open a standard channel. - /// - /// # Arguments - /// - `request_id`: The ID of the request. - /// - `downstream`: A reference to the downstream requesting the channel. + /// Handles a downstream node's request to open a standard channel. fn on_open_standard_channel_request( &mut self, request_id: u32, downstream: Arc>, ); - /// Handles a successful response to opening a standard channel. - /// - /// # Arguments - /// - `request_id`: The ID of the request. - /// - `g_channel_id`: The global channel ID. - /// - `channel_id`: The local channel ID. - /// - /// # Returns - /// - `Result>, Error>`: The downstream associated with the channel or an - /// error. + /// Handles the successful opening of a standard channel with a downstream node. Returns an + /// error if the request ID is unknown. fn on_open_standard_channel_success( &mut self, request_id: u32, @@ -233,62 +145,42 @@ pub trait DownstreamMiningSelector: channel_id: u32, ) -> Result>, Error>; - /// Retrieves all downstream's associated with a channel ID. - /// - /// # Arguments - /// - `channel_id`: The channel ID to query. - /// - /// # Returns - /// - `Option<&Vec>>>`: The list of downstream's or `None`. + /// Retrieves all downstream nodes associated with a channel ID. fn get_downstreams_in_channel(&self, channel_id: u32) -> Option<&Vec>>>; - /// Removes all downstream's associated with a channel ID. - /// - /// # Arguments - /// - `channel_id`: The channel ID to remove downstream's from. - /// - /// # Returns - /// - `Vec>>`: The removed downstream's. + /// Removes all downstream nodes associated with a channel, returning all removed downstream + /// nodes. fn remove_downstreams_in_channel(&mut self, channel_id: u32) -> Vec>>; /// Removes a specific downstream. - /// - /// # Arguments - /// - `d`: A reference to the downstream to remove. fn remove_downstream(&mut self, d: &Arc>); - /// Retrieves a downstream by channel ID (only for standard channels). - /// - /// # Arguments - /// - `channel_id`: The channel ID to query. - /// - /// # Returns - /// - `Option>>`: The downstream or `None`. + // Retrieves the downstream node associated with a specific standard channel ID. + // + // Only for standard channels. fn downstream_from_channel_id(&self, channel_id: u32) -> Option>>; - /// Retrieves all downstream's. - /// - /// # Returns - /// - `Vec>>`: All downstream's. + /// Retrieves all downstream nodes managed by the selector. fn get_all_downstreams(&self) -> Vec>>; } -/// A generic downstream selector. +/// Base trait for selectors managing downstream nodes. pub trait DownstreamSelector {} -/// A no-op implementation of `DownstreamMiningSelector`. +/// No-op selector for cases where routing logic is unnecessary. /// -/// This selector is primarily used for testing or minimal setups where routing logic is not needed. +/// Primarily used for testing, it implements all required traits, but panics with an +/// [`unreachable`] if called. #[derive(Debug, Clone, Copy, Default)] pub struct NullDownstreamMiningSelector(); impl NullDownstreamMiningSelector { - /// Creates a new `NullDownstreamMiningSelector`. + /// Creates a new [`NullDownstreamMiningSelector`] instance. pub fn new() -> Self { NullDownstreamMiningSelector() } - /// Creates a new `NullDownstreamMiningSelector` wrapped in a mutex and an `Arc`. + /// Creates a new [`NullDownstreamMiningSelector`] instance wrapped in an `Arc`. pub fn new_as_mutex() -> Arc> where Self: Sized, @@ -298,10 +190,7 @@ impl NullDownstreamMiningSelector { } impl DownstreamMiningSelector for NullDownstreamMiningSelector { - // Called when a standard channel open request is received. - // - // This method is unreachable in `NullDownstreamMiningSelector` since it is a no-op - // implementation. + /// [`unreachable`] in this no-op implementation. fn on_open_standard_channel_request( &mut self, _request_id: u32, @@ -310,9 +199,7 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_request") } - // Called when a standard channel open request is successful. - // - // This method is unreachable in `NullDownstreamMiningSelector`. + /// [`unreachable`] in this no-op implementation. fn on_open_standard_channel_success( &mut self, _request_id: u32, @@ -322,37 +209,27 @@ impl DownstreamMiningSelector for NullDownst unreachable!("on_open_standard_channel_success") } - // Retrieves the downstream'ss in a specific channel. - // - // This method is unreachable in `NullDownstreamMiningSelector`. + /// [`unreachable`] in this no-op implementation. fn get_downstreams_in_channel(&self, _channel_id: u32) -> Option<&Vec>>> { unreachable!("get_downstreams_in_channel") } - // Removes downstream's in a specific channel. - // - // This method is unreachable in `NullDownstreamMiningSelector`. + /// [`unreachable`] in this no-op implementation. fn remove_downstreams_in_channel(&mut self, _channel_id: u32) -> Vec>> { unreachable!("remove_downstreams_in_channel") } - // Removes a specific downstream node. - // - // This method is unreachable in `NullDownstreamMiningSelector`. + /// [`unreachable`] in this no-op implementation. fn remove_downstream(&mut self, _d: &Arc>) { unreachable!("remove_downstream") } - // Retrieves the downstream associated with a specific channel ID. - // - // This method is unreachable in `NullDownstreamMiningSelector`. + /// [`unreachable`] in this no-op implementation. fn downstream_from_channel_id(&self, _channel_id: u32) -> Option>> { unreachable!("downstream_from_channel_id") } - // Retrieves all downstream nodes managed by this selector. - // - // This method is unreachable in `NullDownstreamMiningSelector`. + /// [`unreachable`] in this no-op implementation. fn get_all_downstreams(&self) -> Vec>> { unreachable!("get_all_downstreams") } @@ -360,45 +237,39 @@ impl DownstreamMiningSelector for NullDownst impl DownstreamSelector for NullDownstreamMiningSelector {} -/// Trait for selecting upstream nodes in a mining context. +/// Base trait for selectors managing upstream nodes. pub trait UpstreamSelector {} -/// Trait for selecting upstream mining nodes. +/// Specialized trait for selectors managing upstream mining nodes. +/// +/// This trait is implemented by roles with multiple upstream connections, such as proxies or +/// pools. It provides logic to route messages received by the implementing role (e.g., from mining +/// devices or downstream proxies) to the appropriate upstream nodes. /// -/// This trait allows pairing downstream mining nodes with upstream nodes -/// based on their settings and capabilities. +/// For example, a mining proxy with multiple upstream pools would implement this trait to handle +/// upstream connection setup, failover, and routing logic. pub trait UpstreamMiningSelctor< Down: IsMiningDownstream, Up: IsMiningUpstream, Sel: DownstreamMiningSelector, >: UpstreamSelector { - /// Handles the `SetupConnection` process. + /// Handles the setup of connections to upstream nodes with the given [`PairSettings`]. /// - /// # Arguments - /// - `pair_settings`: The settings for pairing downstream and upstream nodes. - /// - /// # Returns - /// - `Ok((Vec>>, u32))`: A vector of upstream nodes and their combined flags. - /// - `Err`: If no upstreams are pairable. + /// Returns an error if the upstream and downstream node(s) are unpairable. #[allow(clippy::type_complexity)] fn on_setup_connection( &mut self, pair_settings: &PairSettings, ) -> Result<(Vec>>, u32), Error>; - /// Retrieves an upstream node by its ID. - /// - /// # Arguments - /// - `upstream_id`: The unique ID of the upstream node. - /// - /// # Returns - /// - `Some`: The upstream node. - /// - `None`: If no upstream is found. + /// Retrieves an upstream node by its ID, if exists. fn get_upstream(&self, upstream_id: u32) -> Option>>; } -/// General implementation of an upstream mining selector. +/// Selector for routing messages to downstream nodes based on pair settings and flags. +/// +/// Tracks upstream nodes and their IDs for efficient lookups. #[derive(Debug)] pub struct GeneralMiningSelector< Sel: DownstreamMiningSelector, @@ -407,9 +278,12 @@ pub struct GeneralMiningSelector< > { /// List of upstream nodes. pub upstreams: Vec>>, - /// Mapping of upstream IDs to their respective nodes. + + /// Mapping of upstream IDs to their corresponding upstream nodes. pub id_to_upstream: HashMap>, BuildNoHashHasher>, + sel: std::marker::PhantomData, + down: std::marker::PhantomData, } @@ -419,10 +293,7 @@ impl< Down: IsMiningDownstream, > GeneralMiningSelector { - /// Creates a new `GeneralMiningSelector`. - /// - /// # Arguments - /// - `upstreams`: A vector of upstream nodes. + /// Creates a new [`GeneralMiningSelector`] instance with the given upstream nodes. pub fn new(upstreams: Vec>>) -> Self { let mut id_to_upstream = HashMap::with_hasher(BuildNoHashHasher::default()); for up in &upstreams { @@ -437,9 +308,6 @@ impl< } /// Updates the list of upstream nodes. - /// - /// # Arguments - /// - `upstreams`: The new list of upstream nodes. pub fn update_upstreams(&mut self, upstreams: Vec>>) { self.upstreams = upstreams; } @@ -459,14 +327,6 @@ impl< Up: IsMiningUpstream, > UpstreamMiningSelctor for GeneralMiningSelector { - // Handles the `SetupConnection` process and determines the pairable upstream nodes. - // - // # Arguments - // - `pair_settings`: The settings for pairing downstream and upstream nodes. - // - // # Returns - // - `Ok((Vec>>, u32))`: Pairable upstream nodes and their combined flags. - // - `Err`: If no upstreams are pairable. fn on_setup_connection( &mut self, pair_settings: &PairSettings, @@ -489,14 +349,6 @@ impl< Err(Error::NoPairableUpstream((2, 2, 0))) } - // Retrieves an upstream node by its ID. - // - // # Arguments - // - `upstream_id`: The unique ID of the upstream node. - // - // # Returns - // - `Some`: The upstream node. - // - `None`: If no upstream is found. fn get_upstream(&self, upstream_id: u32) -> Option>> { self.id_to_upstream.get(&upstream_id).cloned() } From 6af742513daf640ade3306cff6e3d0031e9f07d1 Mon Sep 17 00:00:00 2001 From: RJ Rybarczyk Date: Mon, 23 Dec 2024 21:59:27 -0500 Subject: [PATCH 16/18] Utils doc additions --- protocols/v2/roles-logic-sv2/src/utils.rs | 335 ++++++++++++++++------ 1 file changed, 242 insertions(+), 93 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index 149958fcf..42e3053e2 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -1,4 +1,9 @@ -//! A collection of helper primitives +//! # Collection of Helper Primitives +//! +//! Provides a collection of utilities and helper structures used throughout the Stratum V2 +//! protocol implementation. These utilities simplify common tasks, such as ID generation and +//! management, mutex management, difficulty target calculations, merkle root calculations, and +//! more. use std::{ convert::{TryFrom, TryInto}, @@ -30,7 +35,8 @@ use tracing::error; use crate::errors::Error; -/// Generator of unique ids. +/// Generator of unique IDs for channels and groups. +/// /// It keeps an internal counter, which is incremented every time a new unique id is requested. #[derive(Debug, PartialEq, Eq, Clone)] pub struct Id { @@ -38,11 +44,12 @@ pub struct Id { } impl Id { - /// constructor + /// Creates a new [`Id`] instance initialized to `0`. pub fn new() -> Self { Self { state: 0 } } - /// return current state and increment + + /// Increments then returns the internal state on a new ID. #[allow(clippy::should_implement_trait)] pub fn next(&mut self) -> u32 { self.state += 1; @@ -56,36 +63,37 @@ impl Default for Id { } } -/// A custom `Mutex` implementation that provides enhanced safety and ergonomics over -/// the standard `std::sync::Mutex`. +/// Custom synchronization primitive for managing shared mutable state. +/// +/// This custom mutex implementation builds on [`std::sync::Mutex`] to enhance usability and safety +/// in concurrent environments. It provides ergonomic methods to safely access and modify inner +/// values while reducing the risk of deadlocks and panics. It is used throughout SRI applications +/// to managed shared state across multiple threads, such as tracking active mining sessions, +/// routing jobs, and managing connections safely and efficiently. /// -/// This `Mutex` offers the following features: +/// ## Advantages /// - **Closure-Based Locking:** The `safe_lock` method encapsulates the locking process, ensuring /// the lock is automatically released after the closure completes. -/// - **Error Handling:** `safe_lock` enforces explicit handling of potential `PoisonError` +/// - **Error Handling:** `safe_lock` enforces explicit handling of potential [`PoisonError`] /// conditions, reducing the risk of panics caused by poisoned locks. /// - **Panic-Safe Option:** The `super_safe_lock` method provides an alternative that unwraps the /// result of `safe_lock`, with optional runtime safeguards against panics. /// - **Extensibility:** Includes feature-gated functionality to customize behavior, such as -/// stricter runtime checks using external tools like `no-panic`. -/// -/// This design minimizes the risk of common concurrency pitfalls and promotes safer -/// handling of shared mutable state. +/// stricter runtime checks using external tools like +/// [`no-panic`](https://github.com/dtolnay/no-panic). #[derive(Debug)] pub struct Mutex(Mutex_); impl Mutex { - /// `safe_lock` takes a closure that takes a mutable reference to the inner value, and returns a - /// result that either contains the return value of the closure, or a `PoisonError` that - /// contains a `MutexGuard` to the inner value. This is used to ensure no async executions - /// while locked. To prevent `PoisonLock` errors, unwraps should never be used within the - /// closure. Always return the result and handle outside of the safe lock. + /// Mutex safe lock. /// - /// Arguments: + /// Safely locks the `Mutex` and executes a closer (`thunk`) with a mutable reference to to the + /// inner value. This ensures that the lock is automatically released after the closure + /// completes, preventing deadlocks. It explicitly returns a [`PoisonError`] containing a + /// [`MutexGuard`] to the inner value in cases where the lock is poisoned. /// - /// * `thunk`: A closure that takes a mutable reference to the value inside the Mutex and - /// returns a - /// value of type Ret. + /// To prevent poison lock errors, unwraps should never be used within the closure. The result + /// should always be returned and handled outside of the sage lock. pub fn safe_lock(&self, thunk: F) -> Result>> where F: FnOnce(&mut T) -> Ret, @@ -96,7 +104,13 @@ impl Mutex { Ok(return_value) } - /// Mutex super safe lock + /// Mutex super safe lock. + /// + /// Locks the `Mutex` and executes a closure (`thunk`) with a mutable reference to the inner + /// value, panicking if the lock is poisoned. + /// + /// This is a convenience wrapper around `safe_lock` for cases where explicit error handling is + /// unnecessary or undesirable. Use with caution in production code. pub fn super_safe_lock(&self, thunk: F) -> Ret where F: FnOnce(&mut T) -> Ret, @@ -129,34 +143,34 @@ impl Mutex { //} } - /// Mutex constructor + /// Creates a new [`Mutex`] instance, storing the initial value inside. pub fn new(v: T) -> Self { Mutex(Mutex_::new(v)) } - /// remove from Mutex + /// Removes lock for direct access. + /// + /// Acquires a lock on the [`Mutex`] and returns a [`MutexGuard`] for direct access to the + /// inner value. Allows for manual lock handling and is useful in scenarios where closures are + /// not convenient. pub fn to_remove(&self) -> Result, PoisonError>> { self.0.lock() } } -/// It takes a coinbase transaction, a list of transactions, and a list of indices, and returns the -/// merkle root of the transactions at the given indices -/// -/// Arguments: -/// -/// * `coinbase_tx_prefix`: the first part of the coinbase transaction, before the extranonce. -/// This should be converted from [`binary_sv2::B064K`] -/// * `coinbase_tx_suffix`: the coinbase transaction suffix, which is the part of the coinbase -/// transaction after the extranonce. This should be converted from [`binary_sv2::B064K`] -/// * `extranonce`: the extranonce that the miner is using this value should be converted from -/// This should be converted from [`binary_sv2::B032`] and padded with zeros if not 32 bytes long -/// * `path`: a list of transaction hashes that are used to calculate the merkle root. -/// This should be converted from [`binary_sv2::U256`] +/// Computes the Merkle root from coinbase transaction components and a path of transaction hashes. /// -/// Returns: +/// Validates and deserializes a coinbase transaction before building the 32-byte Merkle root. +/// Returns [`None`] is the arguments are invalid. /// -/// A 32 byte merkle root as a vector if successful and None if the arguments are invalid. +/// ## Components +/// * `coinbase_tx_prefix`: First part of the coinbase transaction (the part before the extranonce). +/// Should be converted from [`binary_sv2::B064K`]. +/// * `coinbase_tx_suffix`: Coinbase transaction suffix (the part after the extranonce). Should be +/// converted from [`binary_sv2::B064K`]. +/// * `extranonce`: Extra nonce space. Should be converted from [`binary_sv2::B032`] and padded with +/// zeros if not `32` bytes long. +/// * `path`: List of transaction hashes. Should be converted from [`binary_sv2::U256`]. pub fn merkle_root_from_path>( coinbase_tx_prefix: &[u8], coinbase_tx_suffix: &[u8], @@ -183,13 +197,25 @@ pub fn merkle_root_from_path>( Some(merkle_root_from_path_(coinbase_id, path).to_vec()) } -/// calculate merkle root from path +/// Computes the Merkle root from a validated coinbase transaction and a path of transaction +/// hashes. +/// +/// If the `path` is empty, the coinbase transaction hash (`coinbase_id`) is returned as the root. +/// +/// ## Components +/// * `coinbase_id`: Coinbase transaction hash. +/// * `path`: List of transaction hashes. Should be converted from [`binary_sv2::U256`]. pub fn merkle_root_from_path_>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { match path.len() { 0 => coinbase_id, _ => reduce_path(coinbase_id, path), } } + +// Computes the Merkle root by iteratively combines the coinbase transaction hash with each +// transaction hash in the `path`. +// +// Handles the core logic of combining hashes using the Bitcoin double-SHA256 hashing algorithm. fn reduce_path>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { let mut root = coinbase_id; for node in path { @@ -202,10 +228,31 @@ fn reduce_path>(coinbase_id: [u8; 32], path: &[T]) -> [u8; 32] { root } -/// Coinbase output construction utils +/// Coinbase output transaction. +/// +/// Typically used for parsing coinbase outputs defined in SRI role configuration files. #[derive(Debug, Clone)] pub struct CoinbaseOutput { + /// Specifies type of the script used in the output. + /// + /// Supported values include: + /// - `"P2PK"`: Pay-to-Public-Key + /// - `"P2PKH"`: Pay-to-Public-Key-Hash + /// - `"P2SH"`: Pay-to-Script-Hash + /// - `"P2WPKH"`: Pay-to-Witness-Public-Key-Hash + /// - `"P2WSH"`: Pay-to-Witness-Script-Hash + /// - `"P2TR"`: Pay-to-Taproot pub output_script_type: String, + + /// Value associated with the script, typically a public key or script hash. + /// + /// This field's interpretation depends on the `output_script_type`: + /// - For `"P2PK"`: The raw public key. + /// - For `"P2PKH"`: A public key hash. + /// - For `"P2WPKH"`: A witness public key hash. + /// - For `"P2SH"`: A script hash. + /// - For `"P2WSH"`: A witness script hash. + /// - For `"P2TR"`: An x-only public key. pub output_script_value: String, } @@ -276,27 +323,49 @@ pub enum InputError { DivisionByZero, } -/// Calculates the target (in big endian) given some hashrate and share frequency (per minute). +/// Calculates the mining target threshold for a mining device based on its hashrate (H/s) and +/// desired share frequency (shares/min). +/// +/// Determines the maximum hash value (target), in big endian, that a mining device can produce to +/// find a valid share. The target is derived from the miner's hashrate and the expected number of +/// shares per minute, aligning the miner's workload with the upstream's (e.g. pool's) share +/// frequency requirements. +/// +/// Typically used during connection setup to assign a starting target based on the mining device's +/// reported hashrate and to recalculate during runtime when a mining device's hashrate changes, +/// ensuring they submit shares at the desired rate. /// -/// If we want a specific number of shares per minute from a miner of known hashrate, -/// how do we set the adequate target? +/// ## Formula +/// ```text +/// t = (2^256 - sh) / (sh + 1) +/// ``` +/// +/// Where: +/// - `h`: Mining device hashrate (H/s). +/// - `s`: Shares per second `60 / shares/min` (s). +/// - `sh`: `h * s`, the mining device's work over `s` seconds. /// /// According to \[1] and \[2], it is possible to model the probability of finding a block with -/// a random variable X whose distribution is negative hypergeometric \[3]. -/// Such a variable is characterized as follows. Say that there are n (2^256) elements (possible -/// hash values), of which t (values <= target) are defined as success and the remaining as -/// failures. The variable X has co-domain the positive integers, and X=k is the event where element -/// are drawn one after the other, without replacement, and only the k-th element is successful. -/// The expected value of this variable is (n-t)/(t+1). -/// So, on average, a miner has to perform (2^256-t)/(t+1) hashes before finding hash whose value -/// is below the target t. If the pool wants, on average, a share every s seconds, then, on -/// average, the miner has to perform h*s hashes before finding one that is smaller than the -/// target, where h is the miner's hashrate. Therefore, s*h= (2^256-t)/(t+1). If we consider h the -/// global bitcoin's hashrate, s = 600 seconds and t the bitcoin global target, then, for all the -/// blocks we tried, the two members of the equations have the same order of magnitude and, most -/// of the cases, they coincide with the first two digits. We take this as evidence of the -/// correctness of our calculations. Thus, if the pool wants on average a share every s -/// seconds from a miner with hashrate h, then the target t for the miner is t = (2^256-sh)/(sh+1). +/// a random variable X whose distribution is negative hypergeometric \[3]. Such a variable is +/// characterized as follows: +/// +/// Say that there are `n` (`2^256`) elements (possible hash values), of which `t` (values <= +/// target) are defined as success and the remaining as failures. The variable `X` has co-domain +/// the positive integers, and `X=k` is the event where element are drawn one after the other, +/// without replacement, and only the `k`th element is successful. The expected value of this +/// variable is `(n-t)/(t+1)`. So, on average, a miner has to perform `(2^256-t)/(t+1)` hashes +/// before finding hash whose value is below the target `t`. +/// +/// If the pool wants, on average, a share every `s` seconds, then, on average, the miner has to +/// perform `h*s` hashes before finding one that is smaller than the target, where `h` is the +/// miner's hashrate. Therefore, `s*h= (2^256-t)/(t+1)`. If we consider `h` the global Bitcoin's +/// hashrate, `s = 600` seconds and `t` the Bitcoin global target, then, for all the blocks we +/// tried, the two members of the equations have the same order of magnitude and, most of the +/// cases, they coincide with the first two digits. +/// +/// We take this as evidence of the correctness of our calculations. Thus, if the pool wants on +/// average a share every `s` seconds from a miner with hashrate `h`, then the target `t` for the +/// miner is `t = (2^256-sh)/(sh+1)`. /// /// \[1] [https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3399742) /// @@ -349,9 +418,25 @@ pub fn hash_rate_to_target( Ok(U256::<'static>::from(target)) } -/// this function utilizes the equation used in [`hash_rate_to_target`], but -/// translated to solve for hash_rate given a target: h = (2^256-t)/s(t+1) -/// where s is seconds_between_two_consecutive_shares and t is target +/// Calculates the hashrate (H/s) required to produce a specific number of shares per minute for a +/// given mining target (big endian). +/// +/// It is the inverse of [`hash_rate_to_target`], enabling backward calculations to estimate a +/// mining device's performance from its submitted shares. +/// +/// Typically used to calculate the mining device's effective hashrate during runtime based on the +/// submitted shares and the assigned target, also helps detect changes in miner performance and +/// recalibrate the target (using [`hash_rate_to_target`]) if necessary. +/// +/// ## Formula +/// ```text +/// h = (2^256 - target) / (s * (t + 1)) +/// ``` +/// +/// Where: +/// - `h`: Mining device hashrate (H/s). +/// - `t`: Target threshold. +/// - `s`: Shares per second `60 / shares/min`. pub fn hash_rate_from_target(target: U256<'static>, share_per_min: f64) -> Result { // checks that we are not dividing by zero if share_per_min == 0.0 { @@ -393,12 +478,13 @@ pub fn hash_rate_from_target(target: U256<'static>, share_per_min: f64) -> Resul Ok(result as f64) } +// Converts a [`Uint128`] to a `u128`. fn from_uint128_to_u128(input: Uint128) -> u128 { let input = input.to_be_bytes(); u128::from_be_bytes(input) } -/// helper converter u128 to uint256 +/// Converts a `u128` to a [`Uint256`]. pub fn from_u128_to_uint256(input: u128) -> Uint256 { let input: [u8; 16] = input.to_be_bytes(); let mut be_bytes = [0_u8; 32]; @@ -408,7 +494,23 @@ pub fn from_u128_to_uint256(input: u128) -> Uint256 { Uint256::from_be_bytes(be_bytes) } -/// Used to package multiple SV2 channels into a single group. +/// Generates and manages unique IDs for groups and channels. +/// +/// [`GroupId`] allows combining the group and channel [`Id`]s into a single 64-bit value, enabling +/// efficient tracking and referencing of group-channel relationships. +/// +/// This is specifically used for packaging multiple channels into a single group, such that +/// multiple mining or communication channels can be managed as a cohesive unit. This is +/// particularly useful in scenarios where multiple downstreams share common properties or need to +/// be treated collectively for routing or load balancing. +/// +/// A group acts as a container for multiple channels. Each channel represents a distinct +/// communication pathway between a downstream (e.g. a mining device) and an upstream (e.g. a proxy +/// or pool). Channels within a group might share common configurations, such as difficulty +/// settings or work templates. Operations like broadcasting job updates or handling difficulty +/// adjustments can be efficiently applied to all channels in a group. By treating a group as a +/// single entity, the protocol reduces overhead of managing individual channels, especially in +/// large mining farms. #[derive(Debug, Default)] pub struct GroupId { group_ids: Id, @@ -416,7 +518,9 @@ pub struct GroupId { } impl GroupId { - /// New GroupId it starts with groups 0, since 0 is reserved for hom downstream's + /// Creates a new [`GroupId`] instance. + /// + /// New GroupId it starts with groups 0, since 0 is reserved for hom downstream's. pub fn new() -> Self { Self { group_ids: Id::new(), @@ -424,19 +528,28 @@ impl GroupId { } } - /// Create a group and return the id + /// Generates a new unique group ID. + /// + /// Increments the internal group ID counter and returns the next available group ID. pub fn new_group_id(&mut self) -> u32 { self.group_ids.next() } - /// Create a channel for a particular group and return the channel id - /// _group_id is left for a future use of this API where we have an hierarchy of ids so that we - /// don't break old versions + /// Generates a new unique channel ID for a given group. + /// + /// Increments the internal channel ID counter and returns the next available channel ID. + /// + /// **Note**: The `_group_id` parameter is reserved for future use to create a hierarchical + /// structure of IDs without breaking compatibility with older versions. pub fn new_channel_id(&mut self, _group_id: u32) -> u32 { self.channel_ids.next() } - /// Concatenate a group and a channel id into a complete id + /// Combines a group ID and channel ID into a single 64-bit unique ID. + /// + /// Concatenates the group ID and channel ID, storing the group ID in the higher 32 bits and + /// the channel ID in the lower 32 bits. This combined identifier is useful for efficiently + /// tracking and referencing unique group-channel pairs. pub fn into_complete_id(group_id: u32, channel_id: u32) -> u64 { let part_1 = channel_id.to_le_bytes(); let part_2 = group_id.to_le_bytes(); @@ -445,13 +558,17 @@ impl GroupId { ]) } - /// Get the group part from a complete id + /// Extracts the group ID from a complete group-channel 64-bit unique ID. + /// + /// The group ID is the higher 32 bits. pub fn into_group_id(complete_id: u64) -> u32 { let complete = complete_id.to_le_bytes(); u32::from_le_bytes([complete[4], complete[5], complete[6], complete[7]]) } - /// Get the channel part from a complete id + /// Extracts the channel ID from a complete group-channel 64-bit unique ID. + /// + /// The channel ID is the lower 32 bits. pub fn into_channel_id(complete_id: u64) -> u32 { let complete = complete_id.to_le_bytes(); u32::from_le_bytes([complete[0], complete[1], complete[2], complete[3]]) @@ -591,21 +708,22 @@ fn test_merkle_root_from_path() { ); } -/// Converts a u256 to a BlockHash type +/// Converts a `u256` to a [`BlockHash`] type. pub fn u256_to_block_hash(v: U256<'static>) -> BlockHash { let hash: [u8; 32] = v.to_vec().try_into().unwrap(); let hash = Hash::from_inner(hash); BlockHash::from_hash(hash) } -/// Returns a new `BlockHeader`. -/// Expected endianness inputs: -/// version LE -/// prev_hash BE -/// merkle_root BE -/// time BE -/// bits BE -/// nonce BE +// Returns a new `BlockHeader`. +// +// Expected endianness inputs: +// `version` LE +// `prev_hash` BE +// `merkle_root` BE +// `time` BE +// `bits` BE +// `nonce` BE #[allow(dead_code)] pub(crate) fn new_header( version: i32, @@ -639,14 +757,15 @@ pub(crate) fn new_header( }) } -/// Returns hash of the `BlockHeader`. -/// Endianness reference for the correct hash: -/// version LE -/// prev_hash BE -/// merkle_root BE -/// time BE -/// bits BE -/// nonce BE +// Returns hash of the `BlockHeader`. +// +// Endianness reference for the correct hash: +// `version` LE +// `prev_hash` BE +// `merkle_root` BE +// `time` BE +// `bits` BE +// `nonce` BE #[allow(dead_code)] pub(crate) fn new_header_hash<'decoder>(header: BlockHeader) -> U256<'decoder> { let hash = header.block_hash().to_vec(); @@ -662,6 +781,8 @@ fn u128_as_u256(v: u128) -> Uint256 { Uint256::from_be_slice(&u256).unwrap() } +/// TODO: Not used, to be removed. +/// /// target = u256_max * (shar_per_min / 60) * (2^32 / hash_per_second) /// target = u128_max * ((shar_per_min / 60) * (2^32 / hash_per_second) * u128_max) pub fn target_from_hash_rate(hash_per_second: f32, share_per_min: f32) -> U256<'static> { @@ -675,7 +796,7 @@ pub fn target_from_hash_rate(hash_per_second: f32, share_per_min: f32) -> U256<' target.into() } -/// todo: remove this, not used anywhere +/// TODO: Not used, to be removed. #[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))] pub fn get_target( nonce: u32, @@ -716,7 +837,15 @@ pub fn get_target( hash } -/// Returns a tuple with a list of transaction short hashes and the nonce used to generate them +/// Generates a list of transaction short hashes and a hash of the full transaction list. +/// +/// This function computes a tuple containing: +/// 1. A list of short transaction hashes, calculated using SipHash 24 ([`SipHasher24`]). +/// 2. A combined hash of the full list of transaction IDs. +/// +/// Typically used when a compact representation of transaction IDs is needed or when a combined +/// hash of the full transaction list is required for validation or lookup, like when the Job +/// Declarator client declares a new mining job. pub fn hash_lists_tuple( tx_data: Vec, tx_short_hash_nonce: u64, @@ -735,6 +864,14 @@ pub fn hash_lists_tuple( } /// Computes SipHash 24 of some transaction id (short hash) +/// +/// Computes a short transaction hash using SipHash 24. +/// +/// This function uses [`SipHasher24`] to compute a compact, deterministic hash of a transaction ID +/// ([`bitcoin::Txid`]), leveraging a nonce for uniqueness. +/// +/// Typically used to generate short hashes for efficient transaction identification and comparison +/// in contexts where full transaction IDs are unnecessary. pub fn get_short_hash(txid: bitcoin::Txid, tx_short_hash_nonce: u64) -> ShortTxId<'static> { // hash the short hash nonce let nonce_hash = sha256::Hash::hash(&tx_short_hash_nonce.to_le_bytes()); @@ -749,6 +886,10 @@ pub fn get_short_hash(txid: bitcoin::Txid, tx_short_hash_nonce: u64) -> ShortTxI short_tx_id } +/// Computes a combined hash of a list of transaction IDs. +/// +/// Concatenates all transaction IDs ([`bitcoin::Txid`]) into a single byte array and computes a +/// SHA256 hash of the resulting data. fn tx_hash_list_hash_builder(txid_list: Vec) -> U256<'static> { // TODO: understand if this field is redunant and to be deleted since // the full coinbase is known @@ -761,7 +902,15 @@ fn tx_hash_list_hash_builder(txid_list: Vec) -> U256<'static> { hash.to_vec().try_into().unwrap() } -/// Creates a block from a solution submission +/// Creates a block from a solution submission. +/// +/// Facilitates the creation of valid Bitcoin blocks by combining a declared mining job, a list of +/// transactions, and a solution message from the mining device. It encapsulates the necessary data +/// (the coinbase, a list of transactions, and a miner-provided solution) to assemble a complete +/// and valid block that can be submitted to the Bitcoin network. +/// +/// It is used in the Job Declarator server to handle the final step in processing the mining job +/// solutions. pub struct BlockCreator<'a> { last_declare: DeclareMiningJob<'a>, tx_list: Vec, @@ -769,7 +918,7 @@ pub struct BlockCreator<'a> { } impl<'a> BlockCreator<'a> { - /// Constructor + /// Creates a new [`BlockCreator`] instance. pub fn new( last_declare: DeclareMiningJob<'a>, tx_list: Vec, From ae93264da2a350ee4d1ed4305672e90b550242a1 Mon Sep 17 00:00:00 2001 From: RJ Rybarczyk Date: Tue, 24 Dec 2024 12:33:19 -0500 Subject: [PATCH 17/18] Parsers doc additions --- protocols/v2/roles-logic-sv2/src/parsers.rs | 59 +++++++++++++++++++-- 1 file changed, 54 insertions(+), 5 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/parsers.rs b/protocols/v2/roles-logic-sv2/src/parsers.rs index bdf9e8640..54e7ef0fa 100644 --- a/protocols/v2/roles-logic-sv2/src/parsers.rs +++ b/protocols/v2/roles-logic-sv2/src/parsers.rs @@ -1,7 +1,23 @@ -//! The parsers module provides logic to convert raw Sv2 message data into rust types, -//! as well as logic to handle conversions among Sv2 rust types +//! # Parsing, Serializing, and Message Type Identification //! -//! Most of the logic on this module is tightly coupled with the binary_sv2 crate. +//! Provides logic to convert raw Stratum V2 (Sv2) message data into Rust types, as well as logic +//! to handle conversions among Sv2 rust types. +//! +//! Most of the logic on this module is tightly coupled with the [`binary_sv2`] crate. +//! +//! ## Responsibilities +//! - **Parsing**: Converts raw Sv2 message bytes into Rust enums ([`CommonMessages`], [`Mining`], +//! etc.). +//! - **Serialization**: Converts Rust enums back into binary format for transmission. +//! - **Protocol Abstraction**: Separates logic for different Sv2 subprotocols, ensuring modular and +//! extensible design. +//! - **Message Metadata**: Identifies message types and channel bits for routing and processing. +//! +//! ## Supported Subprotocols +//! - **Common Messages**: Shared across all Sv2 roles. +//! - **Template Distribution**: Handles block templates and transaction data. +//! - **Job Declaration**: Manages mining job assignments and solutions. +//! - **Mining Protocol**: Manages standard mining communication (e.g., job dispatch, shares). use crate::Error; @@ -88,20 +104,31 @@ use mining_sv2::{ use core::convert::{TryFrom, TryInto}; use tracing::error; -// todo: fix this, PoolMessages shouldn't be a generic parser +// TODO: Fix this, PoolMessages shouldn't be a generic parser. /// An alias to a generic parser pub type AnyMessage<'a> = PoolMessages<'a>; +/// Common Sv2 protocol messages used across all subprotocols. +/// +/// These messages are essential +/// for initializing connections and managing endpoints. /// A parser of messages that are common to all Sv2 subprotocols, to be used for parsing raw /// messages #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum CommonMessages<'a> { + /// Notifies about changes in channel endpoint configuration. ChannelEndpointChanged(ChannelEndpointChanged), + + /// Initiates a connection between a client and server. #[cfg_attr(feature = "with_serde", serde(borrow))] SetupConnection(SetupConnection<'a>), + + /// Indicates an error during connection setup. #[cfg_attr(feature = "with_serde", serde(borrow))] SetupConnectionError(SetupConnectionError<'a>), + + /// Acknowledges successful connection setup. SetupConnectionSuccess(SetupConnectionSuccess), } @@ -148,7 +175,29 @@ pub enum JobDeclaration<'a> { SubmitSolution(SubmitSolutionJd<'a>), } -/// A parser of messages of Mining subprotocol, to be used for parsing raw messages +/// Mining subprotocol messages: categorization, encapsulation, and parsing. +/// +/// Encapsulates mining-related Sv2 protocol messages, providing both a structured representation +/// of parsed messages and an abstraction for communication between mining-related roles. These +/// messages are essential for managing mining channels, distributing jobs, and processing shares. +/// +/// ## Purpose +/// - **Parsing Raw Messages**: +/// - Converts raw binary Sv2 mining subprotocol messages into strongly-typed Rust +/// representations. +/// - Simplifies deserialization by mapping raw data directly to the appropriate enum variant. +/// - Once parsed, the [`Mining`] enum provides a structured interface that can be passed through +/// routing and processing layers in roles like proxies or pools. +/// - **Encapsulation**: +/// - Groups mining-related messages into a unified type, abstracting away low-level subprotocol +/// details and making it easier to interact with Sv2 protocol messages. +/// - **Facilitating Modular Handling**: +/// - Categorizes mining messages under a single enum, enabling roles (e.g., proxies or pools) to +/// route and process messages more efficiently using pattern matching and centralized logic. +/// - **Bridging Parsed Messages and Role Logic**: +/// - Acts as a bridge between parsed subprotocol messages and role-specific logic, providing a +/// unified interface for handling mining-related communication. This reduces complexity and +/// ensures consistency across roles. #[derive(Clone, Debug)] #[cfg_attr(feature = "with_serde", derive(Serialize, Deserialize))] pub enum Mining<'a> { From db12082c2eba44bbb786318c90c9ec2e0fae1ae9 Mon Sep 17 00:00:00 2001 From: GitGab19 Date: Thu, 9 Jan 2025 19:46:41 +0100 Subject: [PATCH 18/18] docs enhancement --- .../src/channel_logic/channel_factory.rs | 52 ++--- .../roles-logic-sv2/src/channel_logic/mod.rs | 4 +- .../src/channel_logic/proxy_group_channel.rs | 10 +- .../roles-logic-sv2/src/common_properties.rs | 39 ++-- protocols/v2/roles-logic-sv2/src/errors.rs | 5 +- .../v2/roles-logic-sv2/src/handlers/common.rs | 44 ---- .../src/handlers/job_declaration.rs | 109 ---------- .../v2/roles-logic-sv2/src/handlers/mining.rs | 205 ------------------ .../v2/roles-logic-sv2/src/handlers/mod.rs | 11 +- .../src/handlers/template_distribution.rs | 88 -------- .../v2/roles-logic-sv2/src/job_creator.rs | 26 ++- .../v2/roles-logic-sv2/src/job_dispatcher.rs | 4 +- protocols/v2/roles-logic-sv2/src/lib.rs | 50 ++--- protocols/v2/roles-logic-sv2/src/parsers.rs | 7 +- .../v2/roles-logic-sv2/src/routing_logic.rs | 113 +--------- protocols/v2/roles-logic-sv2/src/utils.rs | 8 +- 16 files changed, 111 insertions(+), 664 deletions(-) diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index 040adb097..d3abdae1e 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -1,4 +1,6 @@ -//! Contains logic for creating channels. +//! # Channel Factory +//! +//! This module contains logic for creating and managing channels. use super::extended_to_standard_job; use crate::{ @@ -76,7 +78,7 @@ pub enum OnNewShare { } impl OnNewShare { - /// Convert standard share into extended share + /// Converts standard share into extended share pub fn into_extended(&mut self, extranonce: Vec, up_id: u32) { match self { OnNewShare::SendErrorDownstream(_) => (), @@ -139,7 +141,7 @@ pub struct StagedPhash { } impl StagedPhash { - /// converts a Staged PrevHash into a SetNewPrevHash message + /// Converts a Staged PrevHash into a SetNewPrevHash message pub fn into_set_p_hash( &self, channel_id: u32, @@ -156,7 +158,7 @@ impl StagedPhash { } impl Share { - /// get share sequence number + /// Get share sequence number pub fn get_sequence_number(&self) -> u32 { match self { Share::Extended(s) => s.sequence_number, @@ -164,7 +166,7 @@ impl Share { } } - /// get share channel id + /// Get share channel id pub fn get_channel_id(&self) -> u32 { match self { Share::Extended(s) => s.channel_id, @@ -172,7 +174,7 @@ impl Share { } } - /// get share timestamp + /// Get share timestamp pub fn get_n_time(&self) -> u32 { match self { Share::Extended(s) => s.ntime, @@ -180,7 +182,7 @@ impl Share { } } - /// get share nonce + /// Get share nonce pub fn get_nonce(&self) -> u32 { match self { Share::Extended(s) => s.nonce, @@ -188,7 +190,7 @@ impl Share { } } - /// get share job id + /// Get share job id pub fn get_job_id(&self) -> u32 { match self { Share::Extended(s) => s.job_id, @@ -196,7 +198,7 @@ impl Share { } } - /// get share version + /// Get share version pub fn get_version(&self) -> u32 { match self { Share::Extended(s) => s.version, @@ -976,7 +978,7 @@ impl ChannelFactory { }, } } - /// updates the downstream target for the given channel_id + /// Updates the downstream target for the given channel_id fn update_target_for_channel(&mut self, channel_id: u32, new_target: Target) -> Option { let channel = self.extended_channels.get_mut(&channel_id)?; channel.target = new_target.into(); @@ -1292,12 +1294,12 @@ impl PoolChannelFactory { true } - /// get extended channel ids + /// Get extended channel ids pub fn get_extended_channels_ids(&self) -> Vec { self.inner.extended_channels.keys().copied().collect() } - /// update coinbase outputs + /// Update coinbase outputs pub fn update_pool_outputs(&mut self, outs: Vec) { self.pool_coinbase_outputs = outs; } @@ -1331,7 +1333,7 @@ pub struct ProxyExtendedChannelFactory { } impl ProxyExtendedChannelFactory { - /// constructor + /// Constructor #[allow(clippy::too_many_arguments)] pub fn new( ids: Arc>, @@ -1520,7 +1522,7 @@ impl ProxyExtendedChannelFactory { /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the /// shares against the channel's respective target and return `OnNewShare` to let us know if - /// and where the the shares should be relayed + /// and where the shares should be relayed pub fn on_submit_shares_extended( &mut self, m: SubmitSharesExtended<'static>, @@ -1723,12 +1725,12 @@ impl ProxyExtendedChannelFactory { self.inner.on_new_extended_mining_job(m) } - /// set new target + /// Set new target pub fn set_target(&mut self, new_target: &mut Target) { self.inner.kind.set_target(new_target); } - /// get last valid job version + /// Get last valid job version pub fn last_valid_job_version(&self) -> Option { self.inner.last_valid_job.as_ref().map(|j| j.0.version) } @@ -1752,44 +1754,44 @@ impl ProxyExtendedChannelFactory { .map(|f| f.0.prev_hash.clone()) } - /// last min ntime + /// Get last min ntime pub fn last_min_ntime(&self) -> Option { self.inner.last_prev_hash.as_ref().map(|f| f.0.min_ntime) } - /// last nbits + /// Get last nbits pub fn last_nbits(&self) -> Option { self.inner.last_prev_hash.as_ref().map(|f| f.0.nbits) } - /// extranonce_size + /// Get extranonce_size pub fn extranonce_size(&self) -> usize { self.inner.extranonces.get_len() } - /// extranonce_2 size + /// Get extranonce_2 size pub fn channel_extranonce2_size(&self) -> usize { self.inner.extranonces.get_len() - self.inner.extranonces.get_range0_len() } // Only used when the proxy is using Job Declaration - /// update pool outputs + /// Updates pool outputs pub fn update_pool_outputs(&mut self, outs: Vec) { self.pool_coinbase_outputs = Some(outs); } - /// get this channel id + /// Get this channel id pub fn get_this_channel_id(&self) -> u32 { self.extended_channel_id } - /// returns the extranonce1 len of the upstream. For a proxy, this would + /// Returns the extranonce1 len of the upstream. For a proxy, this would /// be the extranonce_prefix len pub fn get_upstream_extranonce1_len(&self) -> usize { self.inner.extranonces.get_range0_len() } - /// calls [`ChannelFactory::update_target_for_channel`] + /// Calls [`ChannelFactory::update_target_for_channel`] pub fn update_target_for_channel( &mut self, channel_id: u32, @@ -1807,7 +1809,7 @@ pub enum ExtendedChannelKind { Pool, } impl ExtendedChannelKind { - /// set target + /// Set target pub fn set_target(&mut self, new_target: &mut Target) { match self { ExtendedChannelKind::Proxy { upstream_target } diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs index 05dbb3d12..18653124a 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs @@ -1,3 +1,5 @@ +//! # Channel Logic +//! //! A module for managing channels on applications. //! //! Divided in two submodules: @@ -10,7 +12,7 @@ pub mod proxy_group_channel; use mining_sv2::{NewExtendedMiningJob, NewMiningJob}; use std::convert::TryInto; -/// convert extended to standard job by calculating the merkle root +/// Convert extended to standard job by calculating the merkle root pub fn extended_to_standard_job<'a>( extended: &NewExtendedMiningJob, coinbase_script: &[u8], diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs index 73068fb8e..50a59d175 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs @@ -1,4 +1,6 @@ -//! Contains logic for managing Standard Channels via Group Channels. +//! # Proxy Group Channel +//! +//! This module contains logic for managing Standard Channels via Group Channels. use crate::{common_properties::StandardChannel, parsers::Mining, Error}; @@ -10,13 +12,13 @@ use super::extended_to_standard_job; use nohash_hasher::BuildNoHashHasher; use std::collections::HashMap; -/// wrapper around `GroupChannel` for managing multiple group channels +/// Wrapper around `GroupChannel` for managing multiple group channels #[derive(Debug, Clone, Default)] pub struct GroupChannels { channels: HashMap, } impl GroupChannels { - /// constructor + /// Constructor pub fn new() -> Self { Self { channels: HashMap::new(), @@ -74,7 +76,7 @@ impl GroupChannels { } } - /// get group channel ids + /// Get group channel ids pub fn ids(&self) -> Vec { self.channels.keys().copied().collect() } diff --git a/protocols/v2/roles-logic-sv2/src/common_properties.rs b/protocols/v2/roles-logic-sv2/src/common_properties.rs index 9198fcdec..047aa8ca9 100644 --- a/protocols/v2/roles-logic-sv2/src/common_properties.rs +++ b/protocols/v2/roles-logic-sv2/src/common_properties.rs @@ -18,32 +18,28 @@ use std::{collections::HashMap, fmt::Debug as D}; pub struct CommonDownstreamData { /// Header-only mining flag. /// - /// Enables the processing of block headers only, leaving transaction management and template - /// construction to the upstream node. It reduces the amount of data processed and transmitted - /// by the downstream node, making it useful when bandwidth is limited and transmitting full - /// block templates is costly. + /// Enables the processing of standard jobs only, leaving merkle root manipulation to the + /// upstream node. /// - /// - `true`: The downstream node only processes block headers, relying on the upstream for - /// transaction management. - /// - `false`: The downstream node handles full blocks. + /// - `true`: The downstream node only processes standard jobs, relying on the upstream for + /// merkle root manipulation. + /// - `false`: The downstream node handles extended jobs and merkle root manipulation. pub header_only: bool, /// Work selection flag. /// - /// Enables the selection of which transactions or templates the node will work on. It allows - /// for more autonomy for downstream nodes to define specific version bits to roll, adjust the - /// difficulty target, apply `timestamp` range constraints, etc. + /// Enables the selection of which transactions or templates the node will work on. /// - /// - `true`: The downstream node can modify or customize the mining job, such as choosing - /// specific transactions to include in a block template. - /// - `false`: The downstream node strictly follows the work provided by the upstream, such as - /// pre-constructed templates from a pool. + /// - `true`: The downstream node works on a custom block template, using the Job Declaration + /// Protocol. + /// - `false`: The downstream node strictly follows the work provided by the upstream, based on + /// pre-constructed templates from the upstream (e.g. Pool). pub work_selection: bool, /// Version rolling flag. /// /// Enables rolling the block header version bits which allows for more unique hash generation - /// on the same block template by expanding the nonce-space. Used when other fields (e.g. + /// on the same mining job by expanding the nonce-space. Used when other fields (e.g. /// `nonce` or `extranonce`) are fully exhausted. /// /// - `true`: The downstream supports version rolling and can modify specific bits in the @@ -60,7 +56,7 @@ pub struct CommonDownstreamData { /// range, and flag settings. #[derive(Debug, Copy, Clone)] pub struct PairSettings { - /// Role the settings apply to. + /// Protocol the settings apply to. pub protocol: Protocol, /// Minimum protocol version the node supports. @@ -70,7 +66,7 @@ pub struct PairSettings { pub max_v: u16, /// Flags indicating optional protocol features the node supports (e.g. header-only mining, - /// Noise protocol support, job configuration parameters, etc.). Each protocol field as its own + /// work selection, version-rolling, etc.). Each protocol field as its own /// flags. pub flags: u32, } @@ -122,8 +118,9 @@ pub enum UpstreamChannel { /// A grouped channel for aggregated mining. /// - /// Aggregates mining work for multiple devices or clients under a single channel, enabling the - /// upstream to manage work distribution and result aggregation for an entire group of miners. + /// Aggregates mining work for multiple standard channels under a single group channel, + /// enabling the upstream to manage work distribution and result aggregation for an entire + /// group of channels. /// /// Typically used by a mining proxy managing multiple downstream miners. Group, @@ -138,7 +135,7 @@ pub enum UpstreamChannel { /// Properties of a standard mining channel. /// /// Standard channels are intended to be used by end mining devices with a nominal hashrate, where -/// each device operates on an independent connection to its upstream. +/// each device operates on an independent channel to its upstream. #[derive(Debug, Clone)] pub struct StandardChannel { /// Identifies a specific channel, unique to each mining connection. @@ -148,7 +145,7 @@ pub struct StandardChannel { /// the same upstream node. The identifier remains stable for the whole lifetime of the /// connection. /// - /// Used for broadcasting new jobs by [`mining_sv2::NewExtendedMiningJob`]. + /// Used for broadcasting new jobs by [`mining_sv2::NewMiningJob`]. pub channel_id: u32, /// Identifies a specific group in which the standard channel belongs. diff --git a/protocols/v2/roles-logic-sv2/src/errors.rs b/protocols/v2/roles-logic-sv2/src/errors.rs index 47369a483..bd18eebc2 100644 --- a/protocols/v2/roles-logic-sv2/src/errors.rs +++ b/protocols/v2/roles-logic-sv2/src/errors.rs @@ -1,4 +1,7 @@ -//! Errors specific to this crate +//! # Error Handling +//! +//! This module defines error types and utilities for handling errors in the `roles_logic_sv2` +//! module. It includes the [`Error`] enum for representing various errors. use crate::{ common_properties::CommonDownstreamData, parsers::PoolMessages as AllMessages, diff --git a/protocols/v2/roles-logic-sv2/src/handlers/common.rs b/protocols/v2/roles-logic-sv2/src/handlers/common.rs index a520b8ca7..c36c3a9a1 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/common.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/common.rs @@ -3,13 +3,6 @@ //! This module defines traits and implementations for handling common Stratum V2 messages exchanged //! between upstream and downstream nodes. //! -//! ## Core Traits -//! -//! - `ParseUpstreamCommonMessages`: Implemented by downstream nodes to handle common messages -//! received from upstream nodes, such as setup connection results or channel endpoint changes. -//! - `ParseDownstreamCommonMessages`: Implemented by upstream nodes to process setup connection -//! messages received from downstream nodes. -//! //! ## Message Handling //! //! Handlers in this module are responsible for: @@ -58,10 +51,6 @@ where { /// Takes a message type and a payload, and if the message type is a /// [`crate::parsers::CommonMessages`], it calls the appropriate handler function - /// - /// Arguments: - /// - /// * `message_type`: See [`const_sv2`]. fn handle_message_common( self_: Arc>, message_type: u8, @@ -76,10 +65,6 @@ where } /// Takes a message and it calls the appropriate handler function - /// - /// Arguments: - /// - /// * `message_type`: See [`const_sv2`]. fn handle_message_common_deserilized( self_: Arc>, message: Result, Error>, @@ -124,12 +109,6 @@ where /// /// This method processes a `SetupConnectionSuccess` message and handles it /// by delegating to the appropriate handler. - /// - /// # Arguments - /// - `message`: The `SetupConnectionSuccess` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_setup_connection_success( &mut self, m: SetupConnectionSuccess, @@ -139,24 +118,12 @@ where /// /// This method processes a `SetupConnectionError` message and handles it /// by delegating to the appropriate handler. - /// - /// # Arguments - /// - `message`: The `SetupConnectionError` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_setup_connection_error(&mut self, m: SetupConnectionError) -> Result; /// Handles a `ChannelEndpointChanged` message. /// /// This method processes a `ChannelEndpointChanged` message and handles it /// by delegating to the appropriate handler. - /// - /// # Arguments - /// - `message`: The `ChannelEndpointChanged` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_channel_endpoint_changed( &mut self, m: ChannelEndpointChanged, @@ -191,10 +158,6 @@ where /// It takes a message type and a payload, and if the message is a serialized setup connection /// message, it calls the `on_setup_connection` function on the routing logic, and then calls /// the `handle_setup_connection` function on the router - /// - /// Arguments: - /// - /// * `message_type`: See [`const_sv2`]. fn handle_message_common( self_: Arc>, message_type: u8, @@ -255,13 +218,6 @@ where /// /// This method processes a `SetupConnection` message and handles it /// by delegating to the appropriate handler in the routing logic. - /// - /// # Arguments - /// - `message`: The `SetupConnection` message. - /// - `result`: The result of the `on_setup_connection` call, if available. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_setup_connection( &mut self, m: SetupConnection, diff --git a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs index a2ec2aea1..1a6d56e42 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/job_declaration.rs @@ -4,17 +4,6 @@ //! Stratum V2 protocol. The job declaration process is integral to managing mining tasks and //! transactions between server and client components. //! -//! ## Core Traits -//! -//! - `ParseServerJobDeclarationMessages`: This trait is implemented by downstream nodes to process -//! job declaration messages received from upstream nodes. The trait includes methods for handling -//! job-related events such as mining job token allocation, job declaration successes or errors, -//! and transaction identification or provisioning. -//! - `ParseClientJobDeclarationMessages`: This trait is implemented by upstream nodes to manage job -//! declaration messages received from downstream nodes. It facilitates the handling of job -//! declarations, mining job token allocation, and transaction solutions submitted by downstream -//! nodes. -//! //! ## Message Handling //! //! The handlers are responsible for the following tasks: @@ -60,15 +49,6 @@ where Self: Sized, { /// Routes an incoming job declaration message to the appropriate handler function. - /// - /// # Parameters - /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. - /// - `message_type`: The type identifier of the incoming message. - /// - `payload`: A mutable slice containing the message payload. - /// - /// # Returns - /// - `Ok(SendTo)`: Indicates the message was successfully handled. - /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration( self_: Arc>, message_type: u8, @@ -78,15 +58,6 @@ where } /// Routes a deserialized job declaration message to the appropriate handler function. - /// - /// # Parameters - /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. - /// - `message`: A `Result` containing either the parsed message or an - /// error. - /// - /// # Returns - /// - `Ok(SendTo)`: Indicates the message was successfully handled. - /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration_deserialized( self_: Arc>, message: Result, Error>, @@ -151,12 +122,6 @@ where /// Handles an `AllocateMiningJobTokenSuccess` message. /// /// This method processes a message indicating a successful job token allocation. - /// - /// # Arguments - /// - `message`: The `AllocateMiningJobTokenSuccess` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_allocate_mining_job_token_success( &mut self, message: AllocateMiningJobTokenSuccess, @@ -165,12 +130,6 @@ where /// Handles a `DeclareMiningJobSuccess` message. /// /// This method processes a message indicating a successful mining job declaration. - /// - /// # Arguments - /// - `message`: The `DeclareMiningJobSuccess` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_declare_mining_job_success( &mut self, message: DeclareMiningJobSuccess, @@ -179,12 +138,6 @@ where /// Handles a `DeclareMiningJobError` message. /// /// This method processes a message indicating an error in the mining job declaration process. - /// - /// # Arguments - /// - `message`: The `DeclareMiningJobError` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_declare_mining_job_error( &mut self, message: DeclareMiningJobError, @@ -193,12 +146,6 @@ where /// Handles an `IdentifyTransactions` message. /// /// This method processes a message that provides transaction identification data. - /// - /// # Arguments - /// - `message`: The `IdentifyTransactions` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_identify_transactions( &mut self, message: IdentifyTransactions, @@ -207,12 +154,6 @@ where /// Handles a `ProvideMissingTransactions` message. /// /// This method processes a message that supplies missing transaction data. - /// - /// # Arguments - /// - `message`: The `ProvideMissingTransactions` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_provide_missing_transactions( &mut self, message: ProvideMissingTransactions, @@ -227,15 +168,6 @@ where Self: Sized, { /// Routes an incoming job declaration message to the appropriate handler function. - /// - /// # Parameters - /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. - /// - `message_type`: The type identifier of the incoming message. - /// - `payload`: A mutable slice containing the message payload. - /// - /// # Returns - /// - `Ok(SendTo)`: Indicates the message was successfully handled. - /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration( self_: Arc>, message_type: u8, @@ -245,15 +177,6 @@ where } /// Routes a deserialized job declaration message to the appropriate handler function. - /// - /// # Parameters - /// - `self_`: An `Arc>` instance to ensure thread-safe access to the implementor. - /// - `message`: A `Result` containing either the parsed message or an - /// error. - /// - /// # Returns - /// - `Ok(SendTo)`: Indicates the message was successfully handled. - /// - `Err(Error)`: Indicates an error occurred during message parsing or handling. fn handle_message_job_declaration_deserialized( self_: Arc>, message: Result, Error>, @@ -309,14 +232,6 @@ where } /// Handles an `AllocateMiningJobToken` message. - /// - /// This method processes a message that allocates a mining job token. - /// - /// # Arguments - /// - `message`: The `AllocateMiningJobToken` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_allocate_mining_job_token( &mut self, message: AllocateMiningJobToken, @@ -325,23 +240,11 @@ where /// Handles a `DeclareMiningJob` message. /// /// This method processes a message that declares a new mining job. - /// - /// # Arguments - /// - `message`: The `DeclareMiningJob` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_declare_mining_job(&mut self, message: DeclareMiningJob) -> Result; /// Handles an `IdentifyTransactionsSuccess` message. /// /// This method processes a message that confirms the identification of transactions. - /// - /// # Arguments - /// - `message`: The `IdentifyTransactionsSuccess` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_identify_transactions_success( &mut self, message: IdentifyTransactionsSuccess, @@ -350,12 +253,6 @@ where /// Handles a `ProvideMissingTransactionsSuccess` message. /// /// This method processes a message that confirms the receipt of missing transactions. - /// - /// # Arguments - /// - `message`: The `ProvideMissingTransactionsSuccess` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_provide_missing_transactions_success( &mut self, message: ProvideMissingTransactionsSuccess, @@ -364,11 +261,5 @@ where /// Handles a `SubmitSolution` message. /// /// This method processes a message that submits a solution for the mining job. - /// - /// # Arguments - /// - `message`: The `SubmitSolutionJd` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_submit_solution(&mut self, message: SubmitSolutionJd) -> Result; } diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs index 8edb43976..63d0a52e4 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mining.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mining.rs @@ -3,15 +3,6 @@ //! This module defines traits and functions for handling mining-related messages within the Stratum //! V2 protocol. //! -//! ## Core Traits -//! -//! - `ParseUpstreamMiningMessages`: Implemented by downstream nodes to process mining messages -//! received from upstream nodes. This trait provides methods for handling mining events like new -//! mining jobs, share submissions, extranonce prefix updates, and channel status updates. -//! - `ParseDownstreamMiningMessages`: Implemented by upstream nodes to manage mining messages -//! received from downstream nodes. This trait includes methods for managing tasks such as -//! submitting shares, opening mining channels, and handling mining job responses. -//! //! ## Message Handling //! //! Handlers in this module are responsible for: @@ -88,15 +79,6 @@ pub trait ParseDownstreamMiningMessages< fn get_channel_type(&self) -> SupportedChannelTypes; /// Handles a mining message from the downstream, given its type and payload. - /// - /// # Arguments - /// - `self_mutex`: The `Arc>` representing the downstream entity. - /// - `message_type`: The type of the mining message. - /// - `payload`: The raw payload of the message. - /// - `routing_logic`: The logic for routing the message to the appropriate upstream entity. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_message_mining( self_mutex: Arc>, message_type: u8, @@ -117,14 +99,6 @@ pub trait ParseDownstreamMiningMessages< } /// Deserializes and processes a mining message from the downstream. - /// - /// # Arguments - /// - `self_mutex`: The `Arc>` representing the downstream entity. - /// - `message`: The mining message to be processed. - /// - `routing_logic`: The logic for routing the message to the appropriate upstream entity. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_message_mining_deserialized( self_mutex: Arc>, message: Result, Error>, @@ -326,19 +300,9 @@ pub trait ParseDownstreamMiningMessages< } /// Checks if work selection is enabled for the downstream connection. - /// - /// # Returns - /// - `bool`: `true` if work selection is enabled, `false` otherwise. fn is_work_selection_enabled(&self) -> bool; /// Checks if the downstream user is authorized. - /// - /// # Arguments - /// - `_self_mutex`: The `Arc>` representing the downstream entity. - /// - `_user_identity`: The user's identity to be checked. - /// - /// # Returns - /// - `Result`: `true` if the user is authorized, `false` otherwise. fn is_downstream_authorized( _self_mutex: Arc>, _user_identity: &binary_sv2::Str0255, @@ -347,16 +311,6 @@ pub trait ParseDownstreamMiningMessages< } /// Handles an `OpenStandardMiningChannel` message. - /// - /// This method processes an `OpenStandardMiningChannel` message and initiates the - /// appropriate response. - /// - /// # Arguments - /// - `m`: The `OpenStandardMiningChannel` message. - /// - `up`: An optional upstream entity to which the message is forwarded. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_open_standard_mining_channel( &mut self, m: OpenStandardMiningChannel, @@ -364,15 +318,6 @@ pub trait ParseDownstreamMiningMessages< ) -> Result, Error>; /// Handles an `OpenExtendedMiningChannel` message. - /// - /// This method processes an `OpenExtendedMiningChannel` message and initiates the - /// appropriate response. - /// - /// # Arguments - /// - `m`: The `OpenExtendedMiningChannel` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_open_extended_mining_channel( &mut self, m: OpenExtendedMiningChannel, @@ -381,23 +326,11 @@ pub trait ParseDownstreamMiningMessages< /// Handles an `UpdateChannel` message. /// /// This method processes an `UpdateChannel` message and updates the channel settings. - /// - /// # Arguments - /// - `m`: The `UpdateChannel` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_update_channel(&mut self, m: UpdateChannel) -> Result, Error>; /// Handles a `SubmitSharesStandard` message. /// /// This method processes a `SubmitSharesStandard` message and validates the submitted shares. - /// - /// # Arguments - /// - `m`: The `SubmitSharesStandard` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_standard( &mut self, m: SubmitSharesStandard, @@ -406,12 +339,6 @@ pub trait ParseDownstreamMiningMessages< /// Handles a `SubmitSharesExtended` message. /// /// This method processes a `SubmitSharesExtended` message and validates the submitted shares. - /// - /// # Arguments - /// - `m`: The `SubmitSharesExtended` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_extended( &mut self, m: SubmitSharesExtended, @@ -421,12 +348,6 @@ pub trait ParseDownstreamMiningMessages< /// /// This method processes a `SetCustomMiningJob` message and applies the custom mining job /// settings. - /// - /// # Arguments - /// - `m`: The `SetCustomMiningJob` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_set_custom_mining_job(&mut self, m: SetCustomMiningJob) -> Result, Error>; } @@ -442,16 +363,9 @@ pub trait ParseUpstreamMiningMessages< Self: IsMiningUpstream + Sized + D, { /// Retrieves the type of the channel supported by this upstream parser. - /// - /// # Returns - /// - `SupportedChannelTypes`: The supported channel type for this upstream. fn get_channel_type(&self) -> SupportedChannelTypes; /// Retrieves an optional RequestIdMapper, used to manage request IDs across connections. - /// - /// # Returns - /// - `Option>>`: An optional RequestIdMapper for request ID - /// modification. fn get_request_id_mapper(&mut self) -> Option>> { None } @@ -460,16 +374,6 @@ pub trait ParseUpstreamMiningMessages< /// payload. The implementor of DownstreamMining needs to pass a RequestIdMapper if changing /// the request ID. Proxies typically need this to ensure the request ID is unique across /// the connection. - /// - /// # Arguments - /// - `self_mutex`: The `Arc>` representing the downstream entity. - /// - `message_type`: The type of the incoming message. - /// - `payload`: The payload containing the message data. - /// - `routing_logic`: The logic to handle the routing of the message based on the type. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message, either sending a - /// response or an error. fn handle_message_mining( self_mutex: Arc>, message_type: u8, @@ -488,15 +392,6 @@ pub trait ParseUpstreamMiningMessages< /// Handles the deserialized mining message from the upstream, processing it according to the /// routing logic. - /// - /// # Arguments - /// - `self_mutex`: The `Arc>` representing the downstream entity. - /// - `message`: The deserialized mining message, wrapped in a Result for error handling. - /// - `routing_logic`: The logic used to route the message based on the type. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message, either sending a - /// response or an error. fn handle_message_mining_deserialized( self_mutex: Arc>, message: Result, @@ -837,19 +732,9 @@ pub trait ParseUpstreamMiningMessages< } /// Determines whether work selection is enabled for this upstream. - /// - /// # Returns - /// - `bool`: A boolean indicating if work selection is enabled. fn is_work_selection_enabled(&self) -> bool; /// Handles a successful response for opening a standard mining channel. - /// - /// # Arguments - /// - `m`: The `OpenStandardMiningChannelSuccess` message. - /// - `remote`: An optional reference to the downstream, wrapped in an `Arc`. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_open_standard_mining_channel_success( &mut self, m: OpenStandardMiningChannelSuccess, @@ -857,160 +742,70 @@ pub trait ParseUpstreamMiningMessages< ) -> Result, Error>; /// Handles a successful response for opening an extended mining channel. - /// - /// # Arguments - /// - `m`: The `OpenExtendedMiningChannelSuccess` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_open_extended_mining_channel_success( &mut self, m: OpenExtendedMiningChannelSuccess, ) -> Result, Error>; /// Handles an error when opening a mining channel. - /// - /// # Arguments - /// - `m`: The `OpenMiningChannelError` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the error. fn handle_open_mining_channel_error( &mut self, m: OpenMiningChannelError, ) -> Result, Error>; /// Handles an error when updating a mining channel. - /// - /// # Arguments - /// - `m`: The `UpdateChannelError` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the error. fn handle_update_channel_error(&mut self, m: UpdateChannelError) -> Result, Error>; /// Handles a request to close a mining channel. - /// - /// # Arguments - /// - `m`: The `CloseChannel` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_close_channel(&mut self, m: CloseChannel) -> Result, Error>; /// Handles a request to set the extranonce prefix for mining. - /// - /// # Arguments - /// - `m`: The `SetExtranoncePrefix` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_set_extranonce_prefix( &mut self, m: SetExtranoncePrefix, ) -> Result, Error>; /// Handles a successful submission of shares. - /// - /// # Arguments - /// - `m`: The `SubmitSharesSuccess` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_submit_shares_success( &mut self, m: SubmitSharesSuccess, ) -> Result, Error>; /// Handles an error when submitting shares. - /// - /// # Arguments - /// - `m`: The `SubmitSharesError` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the error. fn handle_submit_shares_error(&mut self, m: SubmitSharesError) -> Result, Error>; /// Handles a new mining job. - /// - /// # Arguments - /// - `m`: The `NewMiningJob` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_new_mining_job(&mut self, m: NewMiningJob) -> Result, Error>; /// Handles a new extended mining job. - /// - /// # Arguments - /// - `m`: The `NewExtendedMiningJob` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_new_extended_mining_job( &mut self, m: NewExtendedMiningJob, ) -> Result, Error>; /// Handles a request to set the new previous hash. - /// - /// # Arguments - /// - `m`: The `SetNewPrevHash` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result, Error>; /// Handles a successful response for setting a custom mining job. - /// - /// # Arguments - /// - `m`: The `SetCustomMiningJobSuccess` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_set_custom_mining_job_success( &mut self, m: SetCustomMiningJobSuccess, ) -> Result, Error>; /// Handles an error when setting a custom mining job. - /// - /// # Arguments - /// - `m`: The `SetCustomMiningJobError` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the error. fn handle_set_custom_mining_job_error( &mut self, m: SetCustomMiningJobError, ) -> Result, Error>; /// Handles a request to set the target for mining. - /// - /// # Arguments - /// - `m`: The `SetTarget` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_set_target(&mut self, m: SetTarget) -> Result, Error>; /// Handles a request to reconnect the mining connection. - /// - /// # Arguments - /// - `m`: The `Reconnect` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_reconnect(&mut self, m: Reconnect) -> Result, Error>; /// Handles a request to set the group channel for mining. - /// - /// # Arguments - /// - `_m`: The `SetGroupChannel` message. - /// - /// # Returns - /// - `Result, Error>`: The result of processing the message. fn handle_set_group_channel(&mut self, _m: SetGroupChannel) -> Result, Error> { Ok(SendTo::None(None)) } diff --git a/protocols/v2/roles-logic-sv2/src/handlers/mod.rs b/protocols/v2/roles-logic-sv2/src/handlers/mod.rs index b717eb34a..a00eac900 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/mod.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/mod.rs @@ -1,4 +1,4 @@ -//! # Handlers Overview +//! # Handlers //! //! This module centralizes the logic for processing and routing Stratum V2 protocol messages, //! defining traits and utilities to handle messages for both Downstream and Upstream roles. @@ -16,10 +16,11 @@ //! - `ParseUpstream[Protocol]`: Handles messages from Upstream nodes. //! //! Supported subprotocols include: -//! - `common`: Shared messages across protocols. -//! - `job_declaration`: Job-related messages. -//! - `mining`: Mining-specific messages. -//! - `template_distribution`: Template distribution messages. +//! - `common`: Shared messages across all Sv2 roles. +//! - `job_declaration`: Manages custom mining job declarations, transactions, and solutions. +//! - `mining`: Manages standard mining communication (e.g., job dispatch, shares submission). +//! - `template_distribution`: Handles block templates updates and transaction data. +//! //! - **Common Messages**: Shared across all Sv2 roles. //! //! ## Return Values //! diff --git a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs index 445a83922..307a0ed0e 100644 --- a/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs +++ b/protocols/v2/roles-logic-sv2/src/handlers/template_distribution.rs @@ -3,16 +3,6 @@ //! This module defines traits and functions for handling template distribution messages within the //! Stratum V2 protocol. //! -//! ## Core Traits -//! -//! - `ParseServerTemplateDistributionMessages`: Implemented by downstream nodes to process template -//! distribution messages received from upstream nodes. This trait includes methods for handling -//! template-related events like new templates, previous hash updates, and transaction data -//! requests. -//! - `ParseClientTemplateDistributionMessages`: Implemented by upstream nodes to manage template -//! distribution messages received from downstream nodes. This trait handles coinbase output size, -//! transaction data requests, and solution submissions. -//! //! ## Message Handling //! //! Handlers are responsible for: @@ -61,16 +51,6 @@ where /// This function is responsible for parsing and dispatching the appropriate handler based on /// the message type. It first deserializes the payload and then routes it to the /// corresponding handler function. - /// - /// # Arguments - /// - `self_`: An `Arc>` representing the instance of the object implementing this - /// trait. - /// - `message_type`: The type of the incoming message. - /// - `payload`: The raw payload data of the message. - /// - /// # Returns - /// - `Result`: The result of processing the message, where `SendTo` indicates - /// the next step in message handling. fn handle_message_template_distribution( self_: Arc>, message_type: u8, @@ -86,14 +66,6 @@ where /// /// This function takes the deserialized message and processes it according to the specific /// message type, invoking the appropriate handler function. - /// - /// # Arguments - /// - `self_`: An `Arc>` representing the instance of the object implementing this - /// trait. - /// - `message`: The deserialized `TemplateDistribution` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_message_template_distribution_desrialized( self_: Arc>, message: Result, Error>, @@ -154,35 +126,17 @@ where /// /// This method processes the `NewTemplate` message, which contains information about a newly /// generated template. - /// - /// # Arguments - /// - `m`: The `NewTemplate` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_new_template(&mut self, m: NewTemplate) -> Result; /// Handles a `SetNewPrevHash` message. /// /// This method processes the `SetNewPrevHash` message, which updates the previous hash for a /// template. - /// - /// # Arguments - /// - `m`: The `SetNewPrevHash` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_set_new_prev_hash(&mut self, m: SetNewPrevHash) -> Result; /// Handles a `RequestTransactionDataSuccess` message. /// /// This method processes the success response for a requested transaction data message. - /// - /// # Arguments - /// - `m`: The `RequestTransactionDataSuccess` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_request_tx_data_success( &mut self, m: RequestTransactionDataSuccess, @@ -191,12 +145,6 @@ where /// Handles a `RequestTransactionDataError` message. /// /// This method processes an error response for a requested transaction data message. - /// - /// # Arguments - /// - `m`: The `RequestTransactionDataError` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_request_tx_data_error( &mut self, m: RequestTransactionDataError, @@ -215,16 +163,6 @@ where /// This function is responsible for parsing and dispatching the appropriate handler based on /// the message type. It first deserializes the payload and then routes it to the /// corresponding handler function. - /// - /// # Arguments - /// - `self_`: An `Arc>` representing the instance of the object implementing this - /// trait. - /// - `message_type`: The type of the incoming message. - /// - `payload`: The raw payload data of the message. - /// - /// # Returns - /// - `Result`: The result of processing the message, where `SendTo` indicates - /// the next step in message handling. fn handle_message_template_distribution( self_: Arc>, message_type: u8, @@ -240,14 +178,6 @@ where /// /// This function takes the deserialized message and processes it according to the specific /// message type, invoking the appropriate handler function. - /// - /// # Arguments - /// - `self_`: An `Arc>` representing the instance of the object implementing this - /// trait. - /// - `message`: The deserialized `TemplateDistribution` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_message_template_distribution_desrialized( self_: Arc>, message: Result, Error>, @@ -282,34 +212,16 @@ where /// Handles a `CoinbaseOutputDataSize` message. /// /// This method processes a message that includes the coinbase output data size. - /// - /// # Arguments - /// - `m`: The `CoinbaseOutputDataSize` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_coinbase_out_data_size(&mut self, m: CoinbaseOutputDataSize) -> Result; /// Handles a `RequestTransactionData` message. /// /// This method processes a message requesting transaction data. - /// - /// # Arguments - /// - `m`: The `RequestTransactionData` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_request_tx_data(&mut self, m: RequestTransactionData) -> Result; /// Handles a `SubmitSolution` message. /// /// This method processes a solution submission message. - /// - /// # Arguments - /// - `m`: The `SubmitSolution` message. - /// - /// # Returns - /// - `Result`: The result of processing the message. fn handle_request_submit_solution(&mut self, m: SubmitSolution) -> Result; } diff --git a/protocols/v2/roles-logic-sv2/src/job_creator.rs b/protocols/v2/roles-logic-sv2/src/job_creator.rs index cc3f0fe7b..9f0e789f1 100644 --- a/protocols/v2/roles-logic-sv2/src/job_creator.rs +++ b/protocols/v2/roles-logic-sv2/src/job_creator.rs @@ -1,5 +1,7 @@ -//! The job creator module provides logic to create extended mining jobs given a template from -//! a template provider as well as logic to clean up old templates when new blocks are mined +//! # Job Creator +//! +//! This module provides logic to create extended mining jobs given a template from +//! a template provider as well as logic to clean up old templates when new blocks are mined. use crate::{errors, utils::Id, Error}; use binary_sv2::B064K; use mining_sv2::NewExtendedMiningJob; @@ -31,7 +33,7 @@ pub struct JobsCreators { extranonce_len: u8, } -/// Transform the byte array `coinbase_outputs` in a vector of TxOut +/// Transforms the byte array `coinbase_outputs` in a vector of TxOut /// It assumes the data to be valid data and does not do any kind of check pub fn tx_outputs_to_costum_scripts(tx_outputs: &[u8]) -> Vec { let mut txs = vec![]; @@ -50,7 +52,7 @@ pub fn tx_outputs_to_costum_scripts(tx_outputs: &[u8]) -> Vec { } impl JobsCreators { - /// constructor + /// Constructor pub fn new(extranonce_len: u8) -> Self { Self { lasts_new_template: Vec::new(), @@ -63,12 +65,12 @@ impl JobsCreators { } } - /// get template id from job + /// Get template id from job pub fn get_template_id_from_job(&self, job_id: u32) -> Option { self.job_to_template_id.get(&job_id).map(|x| x - 1) } - /// used to create new jobs when a new template arrives + /// Used to create new jobs when a new template arrives pub fn on_new_template( &mut self, template: &mut NewTemplate, @@ -135,13 +137,13 @@ impl JobsCreators { } } - /// returns the latest mining target + /// Returns the latest mining target pub fn last_target(&self) -> mining_sv2::Target { self.last_target.clone() } } -/// convert custom job into extended job +/// Converts custom job into extended job pub fn extended_job_from_custom_job( referenced_job: &mining_sv2::SetCustomMiningJob, pool_signature: String, @@ -173,7 +175,7 @@ pub fn extended_job_from_custom_job( ) } -// returns an extended job given the provided template from the Template Provider and other +// Returns an extended job given the provided template from the Template Provider and other // Pool role related fields. // // Pool related arguments: @@ -238,7 +240,7 @@ fn new_extended_job( Ok(new_extended_mining_job) } -// used to extract the coinbase transaction prefix for extended jobs +// Used to extract the coinbase transaction prefix for extended jobs // so the extranonce search space can be introduced fn coinbase_tx_prefix( coinbase: &Transaction, @@ -262,7 +264,7 @@ fn coinbase_tx_prefix( r.try_into().map_err(Error::BinarySv2Error) } -// used to extract the coinbase transaction suffix for extended jobs +// Used to extract the coinbase transaction suffix for extended jobs // so the extranonce search space can be introduced fn coinbase_tx_suffix( coinbase: &Transaction, @@ -424,7 +426,7 @@ impl StrippedCoinbaseTx { }) } - // the coinbase tx prefix is the LE bytes concatenation of the tx version and all + // The coinbase tx prefix is the LE bytes concatenation of the tx version and all // of the tx inputs minus the 32 bytes after the bip34 bytes in the script // and the last input's sequence (used as the first entry in the coinbase tx suffix). // The last 32 bytes after the bip34 bytes in the script will be used to allow extranonce diff --git a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs index e0589c526..c9a0a9f9e 100644 --- a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs +++ b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs @@ -1,4 +1,6 @@ -//! The Job Dispatcher contains relevant logic to maintain group channels in proxy roles such as: +//! Job Dispatcher +//! +//! This module contains relevant logic to maintain group channels in proxy roles such as: //! - converting extended jobs to standard jobs //! - handling updates to jobs when new templates and prev hashes arrive, as well as cleaning up old //! jobs diff --git a/protocols/v2/roles-logic-sv2/src/lib.rs b/protocols/v2/roles-logic-sv2/src/lib.rs index c6ffe67bc..d560f01b0 100644 --- a/protocols/v2/roles-logic-sv2/src/lib.rs +++ b/protocols/v2/roles-logic-sv2/src/lib.rs @@ -1,39 +1,27 @@ -//! Provides all relevant types, traits and functions to implement a valid SV2 role. +//! # Stratum V2 Roles-Logic Library //! -//! - For channel and job management, see [`channel_logic`], which utilizes [`job_creator`] and -//! [`job_dispatcher`] -//! - For message handling, the traits in [`handlers`] should be implemented -//! - For basic traits every implementation should use, see [`common_properties`] -//! - Routers in [`routing_logic`] are used by the traits in `handlers` to decide which -//! downstream/upstream to relay/send by using [`selectors`] -//! - For serializing/deserializing messages, see [`parsers`] -//! - see [`utils`] for helpers such as safe locking, target and merkle root calculations +//! roles_logic_sv2 provides the core logic and utilities for implementing roles in the Stratum V2 +//! (Sv2) protocol, such as miners, pools, and proxies. It abstracts message handling, channel +//! management, job creation, and routing logic, enabling efficient and secure communication across +//! upstream and downstream connections. //! -//!```txt -//! MiningDevice: -//! common_properties::IsUpstream + -//! common_properties::IsMiningUpstream + -//! handlers::common::ParseUpstreamCommonMessages + -//! handlers::mining::ParseUpstreamMiningMessages + +//! ## Usage //! -//! Pool: -//! common_properties::IsDownstream + -//! common_properties::IsMiningDownstream + -//! handlers::common::ParseDownstreamCommonMessages + -//! handlers::mining::ParseDownstreamMiningMessages + +//! To include this crate in your project, run: +//! ```bash +//! $ cargo add roles_logic_sv2 +//! ``` //! -//! ProxyDownstreamConnection: -//! common_properties::IsDownstream + -//! common_properties::IsMiningDownstream + -//! handlers::common::ParseDownstreamCommonMessages + -//! handlers::mining::ParseDownstreamMiningMessages + +//! ## Build Options //! -//! ProxyUpstreamConnection: -//! common_properties::IsUpstream + -//! common_properties::IsMiningUpstream + -//! handlers::common::ParseUpstreamCommonMessages + -//! handlers::mining::ParseUpstreamMiningMessages + -//! ``` +//! This crate can be built with the following features: +//! +//! - `with_serde`: builds [`framing_sv2`], [`binary_sv2`], [`common_messages_sv2`], +//! [`template_distribution_sv2`], [`job_declaration_sv2`], and [`mining_sv2`] crates with +//! `serde`-based encoding and decoding. Note that this feature flag is only used for the Message +//! Generator, and deprecated for any other kind of usage. It will likely be fully deprecated in +//! the future. +//! - `prop_test`: Enables support for property testing in [`template_distribution_sv2`] crate. pub mod channel_logic; pub mod common_properties; pub mod errors; diff --git a/protocols/v2/roles-logic-sv2/src/parsers.rs b/protocols/v2/roles-logic-sv2/src/parsers.rs index 54e7ef0fa..aac402293 100644 --- a/protocols/v2/roles-logic-sv2/src/parsers.rs +++ b/protocols/v2/roles-logic-sv2/src/parsers.rs @@ -15,9 +15,10 @@ //! //! ## Supported Subprotocols //! - **Common Messages**: Shared across all Sv2 roles. -//! - **Template Distribution**: Handles block templates and transaction data. -//! - **Job Declaration**: Manages mining job assignments and solutions. -//! - **Mining Protocol**: Manages standard mining communication (e.g., job dispatch, shares). +//! - **Template Distribution**: Handles block templates updates and transaction data. +//! - **Job Declaration**: Manages custom mining job declarations, transactions, and solutions. +//! - **Mining Protocol**: Manages standard mining communication (e.g., job dispatch, shares +//! submission). use crate::Error; diff --git a/protocols/v2/roles-logic-sv2/src/routing_logic.rs b/protocols/v2/roles-logic-sv2/src/routing_logic.rs index 37ddbb7fa..4da60bf74 100644 --- a/protocols/v2/roles-logic-sv2/src/routing_logic.rs +++ b/protocols/v2/roles-logic-sv2/src/routing_logic.rs @@ -1,8 +1,8 @@ +//! # Routing Logic +//! //! This module contains the routing logic used by handlers to determine where a message should be //! relayed or responded to. //! -//! ## Overview -//! //! The routing logic defines a set of traits and structures to manage message routing in Stratum //! V2. The following components are included: //! @@ -17,29 +17,6 @@ //! - **`MiningProxyRoutingLogic`**: Routing logic valid for a standard Sv2 mining proxy, //! implementing both `CommonRouter` and `MiningRouter`. //! -//! ## Details -//! -//! ### Traits -//! -//! - **`CommonRouter`**: Defines routing behavior for common protocol messages. -//! - **`MiningRouter`**: Defines routing behavior for mining protocol messages. Requires -//! `DownstreamMiningSelector` for downstream selection. -//! -//! ### Enums -//! -//! - **`CommonRoutingLogic`**: Represents possible routing logics for the common protocol, such as -//! proxy-based routing or no routing. -//! - **`MiningRoutingLogic`**: Represents possible routing logics for the mining protocol, -//! supporting additional parameters such as selectors and marker traits. -//! -//! ### Structures -//! -//! - **`NoRouting`**: A minimal implementation of `CommonRouter` and `MiningRouter` that panics -//! when used. Its primary purpose is to serve as a placeholder for cases where no routing logic -//! is applied. -//! - **`MiningProxyRoutingLogic`**: Implements routing logic for a standard Sv2 proxy, including -//! upstream selection and message transformation. -//! //! ## Future Work //! //! - Consider hiding all traits from the public API and exporting only marker traits. @@ -67,12 +44,6 @@ use std::{collections::HashMap, fmt::Debug as D, marker::PhantomData, sync::Arc} /// protocol routing. pub trait CommonRouter: std::fmt::Debug { /// Handles a `SetupConnection` message for the common protocol. - /// - /// # Arguments - /// - `message`: The `SetupConnection` message received. - /// - /// # Returns - /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: The routing result. fn on_setup_connection( &mut self, message: &SetupConnection, @@ -91,14 +62,6 @@ pub trait MiningRouter< >: CommonRouter { /// Handles an `OpenStandardMiningChannel` message from a downstream. - /// - /// # Arguments - /// - `downstream`: The downstream mining entity. - /// - `request`: The mining channel request message. - /// - `downstream_mining_data`: Associated downstream mining data. - /// - /// # Returns - /// - `Result>, Error>`: The upstream mining entity. fn on_open_standard_channel( &mut self, downstream: Arc>, @@ -107,13 +70,6 @@ pub trait MiningRouter< ) -> Result>, Error>; /// Handles an `OpenStandardMiningChannelSuccess` message from an upstream. - /// - /// # Arguments - /// - `upstream`: The upstream mining entity. - /// - `request`: The successful channel opening message. - /// - /// # Returns - /// - `Result>, Error>`: The downstream mining entity. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -235,14 +191,6 @@ impl< /// /// This method initializes the connection between a downstream and an upstream by determining /// the appropriate upstream based on the provided protocol, versions, and flags. - /// - /// # Arguments - /// - `message`: A reference to the `SetupConnection` message containing the connection details. - /// - /// # Returns - /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: On success, returns the - /// downstream connection data and the corresponding setup success message. Returns an error - /// otherwise. fn on_setup_connection( &mut self, message: &SetupConnection, @@ -279,15 +227,6 @@ impl< // This method processes the request to open a standard mining channel. It selects a suitable // upstream, updates the request ID to ensure uniqueness, and then delegates to // `on_open_standard_channel_request_header_only` to finalize the process. - // - // # Arguments - // - `downstream`: The downstream requesting the channel opening. - // - `request`: A mutable reference to the `OpenStandardMiningChannel` message. - // - `downstream_mining_data`: Common data about the downstream mining setup. - // - // # Returns - // - `Result>, Error>`: Returns the selected upstream for the downstream or an - // error. fn on_open_standard_channel( &mut self, downstream: Arc>, @@ -315,14 +254,6 @@ impl< // This method processes the success message received from an upstream when a standard mining // channel is opened. It maps the request ID back to the original ID from the downstream and // updates the associated group and channel IDs in the upstream. - // - // # Arguments - // - `upstream`: The upstream involved in the channel opening. - // - `request`: A mutable reference to the `OpenStandardMiningChannelSuccess` message. - // - // # Returns - // - `Result>, Error>`: Returns the downstream corresponding to the request or - // an error. fn on_open_standard_channel_success( &mut self, upstream: Arc>, @@ -353,13 +284,6 @@ impl< } // Selects the upstream with the lowest total hash rate. -// -// # Arguments -// - `ups`: A mutable slice of upstream mining entities. -// -// # Returns -// - `Arc>`: The upstream entity with the lowest total hash rate. -// // # Panics // This function panics if the slice is empty, as it is internally guaranteed that this function // will only be called with non-empty vectors. @@ -385,12 +309,6 @@ where } // Filters upstream entities that are not configured for header-only mining. -// -// # Arguments -// - `ups`: A mutable slice of upstream mining entities. -// -// # Returns -// - `Vec>>`: A vector of upstream entities that are not header-only. fn filter_header_only(ups: &mut [Arc>]) -> Vec>> where Down: IsMiningDownstream + D, @@ -414,12 +332,6 @@ where // - If only one upstream is available, it is selected. // - If multiple upstreams exist, preference is given to those not configured as header-only. // - Among the remaining upstreams, the one with the lowest total hash rate is selected. -// -// # Arguments -// - `ups`: A mutable slice of upstream mining entities. -// -// # Returns -// - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstream(ups: &mut [Arc>]) -> Option>> where Down: IsMiningDownstream + D, @@ -444,12 +356,6 @@ impl< > MiningProxyRoutingLogic { // Selects an upstream entity from a list of available upstreams. - // - // # Arguments - // - `ups`: A mutable slice of upstream mining entities. - // - // # Returns - // - `Option>>`: The selected upstream entity, or `None` if none are available. fn select_upstreams(ups: &mut [Arc>]) -> Option>> { select_upstream(ups) } @@ -458,12 +364,6 @@ impl< /// /// This method selects compatible upstreams, assigns connection flags, and maps the /// downstream to the selected upstreams. - /// - /// # Arguments - /// - `pair_settings`: The pairing settings for the connection. - /// - /// # Returns - /// - `Result<(CommonDownstreamData, SetupConnectionSuccess), Error>`: The connection result. pub fn on_setup_connection_mining_header_only( &mut self, pair_settings: &PairSettings, @@ -487,14 +387,7 @@ impl< Ok((downstream_data, message)) } - /// Handles a standard channel opening request for header-only mining downstream's. - /// - /// # Arguments - /// - `downstream`: The downstream mining entity. - /// - `request`: The standard mining channel request message. - /// - /// # Returns - /// - `Result>, Error>`: The selected upstream mining entity. + /// Handles a standard channel opening request for header-only mining downstreams. pub fn on_open_standard_channel_request_header_only( &mut self, downstream: Arc>, diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index 42e3053e2..467979d0a 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -87,7 +87,7 @@ pub struct Mutex(Mutex_); impl Mutex { /// Mutex safe lock. /// - /// Safely locks the `Mutex` and executes a closer (`thunk`) with a mutable reference to to the + /// Safely locks the `Mutex` and executes a closer (`thunk`) with a mutable reference to the /// inner value. This ensures that the lock is automatically released after the closure /// completes, preventing deadlocks. It explicitly returns a [`PoisonError`] containing a /// [`MutexGuard`] to the inner value in cases where the lock is poisoned. @@ -212,7 +212,7 @@ pub fn merkle_root_from_path_>(coinbase_id: [u8; 32], path: &[T]) } } -// Computes the Merkle root by iteratively combines the coinbase transaction hash with each +// Computes the Merkle root by iteratively combining the coinbase transaction hash with each // transaction hash in the `path`. // // Handles the core logic of combining hashes using the Bitcoin double-SHA256 hashing algorithm. @@ -430,13 +430,13 @@ pub fn hash_rate_to_target( /// /// ## Formula /// ```text -/// h = (2^256 - target) / (s * (t + 1)) +/// h = (2^256 - t) / (s * (t + 1)) /// ``` /// /// Where: /// - `h`: Mining device hashrate (H/s). /// - `t`: Target threshold. -/// - `s`: Shares per second `60 / shares/min`. +/// - `s`: Shares per minute. pub fn hash_rate_from_target(target: U256<'static>, share_per_min: f64) -> Result { // checks that we are not dividing by zero if share_per_min == 0.0 {