diff --git a/CHANGELOG.md b/CHANGELOG.md
index 23b0ed3eae..4c8cb25c2a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1143,5 +1143,4 @@ This release refactors the registration code for CPU registration to improve sol
### Synapse update
-##
-
+##
\ No newline at end of file
diff --git a/VERSION b/VERSION
index fa5fce04b3..9f4a0fbc18 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-8.0.0
\ No newline at end of file
+8.3.0
\ No newline at end of file
diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py
new file mode 100644
index 0000000000..ae912d5dca
--- /dev/null
+++ b/bittensor/core/async_subtensor.py
@@ -0,0 +1,1608 @@
+import asyncio
+import ssl
+from typing import Optional, Any, Union, TypedDict, Iterable
+
+import aiohttp
+import numpy as np
+import scalecodec
+from bittensor_wallet import Wallet
+from bittensor_wallet.utils import SS58_FORMAT
+from numpy.typing import NDArray
+from scalecodec import GenericCall
+from scalecodec.base import RuntimeConfiguration
+from scalecodec.type_registry import load_type_registry_preset
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.chain_data import (
+ DelegateInfo,
+ custom_rpc_type_registry,
+ StakeInfo,
+ NeuronInfoLite,
+ NeuronInfo,
+ SubnetHyperparameters,
+ decode_account_id,
+)
+from bittensor.core.extrinsics.async_registration import register_extrinsic
+from bittensor.core.extrinsics.async_root import (
+ set_root_weights_extrinsic,
+ root_register_extrinsic,
+)
+from bittensor.core.extrinsics.async_transfer import transfer_extrinsic
+from bittensor.core.extrinsics.async_weights import (
+ commit_weights_extrinsic,
+ set_weights_extrinsic,
+)
+from bittensor.core.settings import (
+ TYPE_REGISTRY,
+ DEFAULTS,
+ NETWORK_MAP,
+ DELEGATES_DETAILS_URL,
+ DEFAULT_NETWORK,
+)
+from bittensor.core.settings import version_as_int
+from bittensor.utils import (
+ torch,
+ ss58_to_vec_u8,
+ format_error_message,
+ decode_hex_identity_dict,
+ validate_chain_endpoint,
+)
+from bittensor.utils.async_substrate_interface import (
+ AsyncSubstrateInterface,
+ TimeoutException,
+)
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
+from bittensor.utils.delegates_details import DelegatesDetails
+from bittensor.utils.weight_utils import generate_weight_hash
+
+
+class ParamWithTypes(TypedDict):
+ name: str # Name of the parameter.
+ type: str # ScaleType string of the parameter.
+
+
+class ProposalVoteData:
+ index: int
+ threshold: int
+ ayes: list[str]
+ nays: list[str]
+ end: int
+
+ def __init__(self, proposal_dict: dict) -> None:
+ self.index = proposal_dict["index"]
+ self.threshold = proposal_dict["threshold"]
+ self.ayes = self.decode_ss58_tuples(proposal_dict["ayes"])
+ self.nays = self.decode_ss58_tuples(proposal_dict["nays"])
+ self.end = proposal_dict["end"]
+
+ @staticmethod
+ def decode_ss58_tuples(line: tuple):
+ """Decodes a tuple of ss58 addresses formatted as bytes tuples."""
+ return [decode_account_id(line[x][0]) for x in range(len(line))]
+
+
+def _decode_hex_identity_dict(info_dictionary: dict[str, Any]) -> dict[str, Any]:
+ """Decodes a dictionary of hexadecimal identities."""
+ for k, v in info_dictionary.items():
+ if isinstance(v, dict):
+ item = next(iter(v.values()))
+ else:
+ item = v
+ if isinstance(item, tuple) and item:
+ if len(item) > 1:
+ try:
+ info_dictionary[k] = (
+ bytes(item).hex(sep=" ", bytes_per_sep=2).upper()
+ )
+ except UnicodeDecodeError:
+ logging.error(f"Could not decode: {k}: {item}.")
+ else:
+ try:
+ info_dictionary[k] = bytes(item[0]).decode("utf-8")
+ except UnicodeDecodeError:
+ logging.error(f"Could not decode: {k}: {item}.")
+ else:
+ info_dictionary[k] = item
+
+ return info_dictionary
+
+
+class AsyncSubtensor:
+ """Thin layer for interacting with Substrate Interface. Mostly a collection of frequently-used calls."""
+
+ def __init__(self, network: str = DEFAULT_NETWORK):
+ if network in NETWORK_MAP:
+ self.chain_endpoint = NETWORK_MAP[network]
+ self.network = network
+ if network == "local":
+ logging.warning(
+ "Warning: Verify your local subtensor is running on port 9944."
+ )
+ else:
+ is_valid, _ = validate_chain_endpoint(network)
+ if is_valid:
+ self.chain_endpoint = network
+ if network in NETWORK_MAP.values():
+ self.network = next(
+ key for key, value in NETWORK_MAP.items() if value == network
+ )
+ else:
+ self.network = "custom"
+ else:
+ logging.info(
+ f"Network not specified or not valid. Using default chain endpoint: {NETWORK_MAP[DEFAULTS.subtensor.network]}."
+ )
+ logging.info(
+ "You can set this for commands with the --network flag, or by setting this in the config."
+ )
+ self.chain_endpoint = NETWORK_MAP[DEFAULTS.subtensor.network]
+ self.network = DEFAULTS.subtensor.network
+
+ self.substrate = AsyncSubstrateInterface(
+ chain_endpoint=self.chain_endpoint,
+ ss58_format=SS58_FORMAT,
+ type_registry=TYPE_REGISTRY,
+ chain_name="Bittensor",
+ )
+
+ def __str__(self):
+ return f"Network: {self.network}, Chain: {self.chain_endpoint}"
+
+ async def __aenter__(self):
+ logging.info(
+ f"Connecting to Substrate: {self}..."
+ )
+ try:
+ async with self.substrate:
+ return self
+ except TimeoutException:
+ logging.error(
+ f"Error: Timeout occurred connecting to substrate. Verify your chain and network settings: {self}"
+ )
+ raise ConnectionError
+ except (ConnectionRefusedError, ssl.SSLError) as error:
+ logging.error(
+ f"Error: Connection refused when connecting to substrate. "
+ f"Verify your chain and network settings: {self}. Error: {error}"
+ )
+ raise ConnectionError
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.substrate.close()
+
+ async def encode_params(
+ self,
+ call_definition: dict[str, list["ParamWithTypes"]],
+ params: Union[list[Any], dict[str, Any]],
+ ) -> str:
+ """Returns a hex encoded string of the params using their types."""
+ param_data = scalecodec.ScaleBytes(b"")
+
+ for i, param in enumerate(call_definition["params"]):
+ scale_obj = await self.substrate.create_scale_object(param["type"])
+ if isinstance(params, list):
+ param_data += scale_obj.encode(params[i])
+ else:
+ if param["name"] not in params:
+ raise ValueError(f"Missing param {param['name']} in params dict.")
+
+ param_data += scale_obj.encode(params[param["name"]])
+
+ return param_data.to_hex()
+
+ async def get_current_block(self) -> int:
+ """
+ Returns the current block number on the Bittensor blockchain. This function provides the latest block number, indicating the most recent state of the blockchain.
+
+ Returns:
+ int: The current chain block number.
+
+ Knowing the current block number is essential for querying real-time data and performing time-sensitive operations on the blockchain. It serves as a reference point for network activities and data synchronization.
+ """
+ return await self.substrate.get_block_number()
+
+ async def get_block_hash(self, block_id: Optional[int] = None):
+ """
+ Retrieves the hash of a specific block on the Bittensor blockchain. The block hash is a unique identifier representing the cryptographic hash of the block's content, ensuring its integrity and immutability.
+
+ Args:
+ block_id (int): The block number for which the hash is to be retrieved.
+
+ Returns:
+ str: The cryptographic hash of the specified block.
+
+ The block hash is a fundamental aspect of blockchain technology, providing a secure reference to each block's data. It is crucial for verifying transactions, ensuring data consistency, and maintaining the trustworthiness of the blockchain.
+ """
+ if block_id:
+ return await self.substrate.get_block_hash(block_id)
+ else:
+ return await self.substrate.get_chain_head()
+
+ async def is_hotkey_registered_any(
+ self, hotkey_ss58: str, block_hash: Optional[str] = None
+ ) -> bool:
+ """
+ Checks if a neuron's hotkey is registered on any subnet within the Bittensor network.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ block_hash (Optional[str]): The blockchain block_hash representation of block id.
+
+ Returns:
+ bool: ``True`` if the hotkey is registered on any subnet, False otherwise.
+
+ This function is essential for determining the network-wide presence and participation of a neuron.
+ """
+ return len(await self.get_netuids_for_hotkey(hotkey_ss58, block_hash)) > 0
+
+ async def get_subnet_burn_cost(
+ self, block_hash: Optional[str] = None
+ ) -> Optional[str]:
+ """
+ Retrieves the burn cost for registering a new subnet within the Bittensor network. This cost represents the amount of Tao that needs to be locked or burned to establish a new subnet.
+
+ Args:
+ block_hash (Optional[int]): The blockchain block_hash of the block id.
+
+ Returns:
+ int: The burn cost for subnet registration.
+
+ The subnet burn cost is an important economic parameter, reflecting the network's mechanisms for controlling the proliferation of subnets and ensuring their commitment to the network's long-term viability.
+ """
+ lock_cost = await self.query_runtime_api(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block_hash=block_hash,
+ )
+
+ return lock_cost
+
+ async def get_total_subnets(
+ self, block_hash: Optional[str] = None
+ ) -> Optional[int]:
+ """
+ Retrieves the total number of subnets within the Bittensor network as of a specific blockchain block.
+
+ Args:
+ block_hash (Optional[str]): The blockchain block_hash representation of block id.
+
+ Returns:
+ Optional[str]: The total number of subnets in the network.
+
+ Understanding the total number of subnets is essential for assessing the network's growth and the extent of its decentralized infrastructure.
+ """
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="TotalNetworks",
+ params=[],
+ block_hash=block_hash,
+ )
+ return result
+
+ async def get_subnets(self, block_hash: Optional[str] = None) -> list[int]:
+ """
+ Retrieves the list of all subnet unique identifiers (netuids) currently present in the Bittensor network.
+
+ Args:
+ block_hash (Optional[str]): The hash of the block to retrieve the subnet unique identifiers from.
+
+ Returns:
+ A list of subnet netuids.
+
+ This function provides a comprehensive view of the subnets within the Bittensor network,
+ offering insights into its diversity and scale.
+ """
+ result = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ block_hash=block_hash,
+ reuse_block_hash=True,
+ )
+ return (
+ []
+ if result is None or not hasattr(result, "records")
+ else [netuid async for netuid, exists in result if exists]
+ )
+
+ async def is_hotkey_delegate(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> bool:
+ """
+ Determines whether a given hotkey (public key) is a delegate on the Bittensor network. This function checks if the neuron associated with the hotkey is part of the network's delegation system.
+
+ Args:
+ hotkey_ss58 (str): The SS58 address of the neuron's hotkey.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+ reuse_block (Optional[bool]): Whether to reuse the last-used block hash.
+
+ Returns:
+ `True` if the hotkey is a delegate, `False` otherwise.
+
+ Being a delegate is a significant status within the Bittensor network, indicating a neuron's involvement in consensus and governance processes.
+ """
+ delegates = await self.get_delegates(
+ block_hash=block_hash, reuse_block=reuse_block
+ )
+ return hotkey_ss58 in [info.hotkey_ss58 for info in delegates]
+
+ async def get_delegates(
+ self, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> list[DelegateInfo]:
+ """
+ Fetches all delegates on the chain
+
+ Args:
+ block_hash (Optional[str]): hash of the blockchain block number for the query.
+ reuse_block (Optional[bool]): whether to reuse the last-used block hash.
+
+ Returns:
+ List of DelegateInfo objects, or an empty list if there are no delegates.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="DelegateInfoRuntimeApi",
+ method="get_delegates",
+ params=[],
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+ if hex_bytes_result is not None:
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return DelegateInfo.list_from_vec_u8(bytes_result)
+ else:
+ return []
+
+ async def get_stake_info_for_coldkey(
+ self,
+ coldkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[StakeInfo]:
+ """
+ Retrieves stake information associated with a specific coldkey. This function provides details about the stakes held by an account, including the staked amounts and associated delegates.
+
+ Args:
+ coldkey_ss58 (str): The ``SS58`` address of the account's coldkey.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ A list of StakeInfo objects detailing the stake allocations for the account.
+
+ Stake information is vital for account holders to assess their investment and participation in the network's delegation and consensus processes.
+ """
+ encoded_coldkey = ss58_to_vec_u8(coldkey_ss58)
+
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="StakeInfoRuntimeApi",
+ method="get_stake_info_for_coldkey",
+ params=[encoded_coldkey],
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return StakeInfo.list_from_vec_u8(bytes_result)
+
+ async def get_stake_for_coldkey_and_hotkey(
+ self, hotkey_ss58: str, coldkey_ss58: str, block_hash: Optional[str] = None
+ ) -> Balance:
+ """
+ Retrieves stake information associated with a specific coldkey and hotkey.
+
+ Args:
+ hotkey_ss58 (str): the hotkey SS58 address to query
+ coldkey_ss58 (str): the coldkey SS58 address to query
+ block_hash (Optional[str]): the hash of the blockchain block number for the query.
+
+ Returns:
+ Stake Balance for the given coldkey and hotkey
+ """
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Stake",
+ params=[hotkey_ss58, coldkey_ss58],
+ block_hash=block_hash,
+ )
+ return Balance.from_rao(_result or 0)
+
+ async def query_runtime_api(
+ self,
+ runtime_api: str,
+ method: str,
+ params: Optional[Union[list[list[int]], dict[str, int], list[int]]],
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional[str]:
+ """
+ Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users who need to interact with specific runtime methods and decode complex data types.
+
+ Args:
+ runtime_api (str): The name of the runtime API to query.
+ method (str): The specific method within the runtime API to call.
+ params (Optional[Union[list[list[int]], dict[str, int]]]): The parameters to pass to the method call.
+ block_hash (Optional[str]): The hash of the blockchain block number at which to perform the query.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails.
+
+ This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed and specific interactions with the network's runtime environment.
+ """
+ call_definition = TYPE_REGISTRY["runtime_api"][runtime_api]["methods"][method]
+
+ data = (
+ "0x"
+ if params is None
+ else await self.encode_params(
+ call_definition=call_definition, params=params
+ )
+ )
+ api_method = f"{runtime_api}_{method}"
+
+ json_result = await self.substrate.rpc_request(
+ method="state_call",
+ params=[api_method, data, block_hash] if block_hash else [api_method, data],
+ reuse_block_hash=reuse_block,
+ )
+
+ if json_result is None:
+ return None
+
+ return_type = call_definition["type"]
+
+ as_scale_bytes = scalecodec.ScaleBytes(json_result["result"])
+
+ rpc_runtime_config = RuntimeConfiguration()
+ rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy"))
+ rpc_runtime_config.update_type_registry(custom_rpc_type_registry)
+
+ obj = rpc_runtime_config.create_scale_object(return_type, as_scale_bytes)
+ if obj.data.to_hex() == "0x0400": # RPC returned None result
+ return None
+
+ return obj.decode()
+
+ async def get_balance(
+ self,
+ *addresses: str,
+ block_hash: Optional[str] = None,
+ ) -> dict[str, Balance]:
+ """
+ Retrieves the balance for given coldkey(s)
+
+ Args:
+ addresses (str): coldkey addresses(s).
+ block_hash (Optional[str]): the block hash, optional.
+
+ Returns:
+ Dict of {address: Balance objects}.
+ """
+ calls = [
+ (
+ await self.substrate.create_storage_key(
+ "System", "Account", [address], block_hash=block_hash
+ )
+ )
+ for address in addresses
+ ]
+ batch_call = await self.substrate.query_multi(calls, block_hash=block_hash)
+ results = {}
+ for item in batch_call:
+ value = item[1] or {"data": {"free": 0}}
+ results.update({item[0].params[0]: Balance(value["data"]["free"])})
+ return results
+
+ async def get_transfer_fee(
+ self, wallet: "Wallet", dest: str, value: Union["Balance", float, int]
+ ) -> "Balance":
+ """
+ Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. This function simulates the transfer to estimate the associated cost, taking into account the current network conditions and transaction complexity.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet from which the transfer is initiated.
+ dest (str): The ``SS58`` address of the destination account.
+ value (Union[bittensor.utils.balance.Balance, float, int]): The amount of tokens to be transferred, specified as a Balance object, or in Tao (float) or Rao (int) units.
+
+ Returns:
+ bittensor.utils.balance.Balance: The estimated transaction fee for the transfer, represented as a Balance object.
+
+ Estimating the transfer fee is essential for planning and executing token transactions, ensuring that the wallet has sufficient funds to cover both the transfer amount and the associated costs. This function provides a crucial tool for managing financial operations within the Bittensor network.
+ """
+ if isinstance(value, float):
+ value = Balance.from_tao(value)
+ elif isinstance(value, int):
+ value = Balance.from_rao(value)
+
+ if isinstance(value, Balance):
+ call = await self.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": dest, "value": value.rao},
+ )
+
+ try:
+ payment_info = await self.substrate.get_payment_info(
+ call=call, keypair=wallet.coldkeypub
+ )
+ except Exception as e:
+ logging.error(
+ f":cross_mark: Failed to get payment info: {e}"
+ )
+ payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao
+
+ return Balance.from_rao(payment_info["partialFee"])
+ else:
+ fee = Balance.from_rao(int(2e7))
+ logging.error(
+ "To calculate the transaction fee, the value must be Balance, float, or int. Received type: %s. Fee "
+ "is %s",
+ type(value),
+ 2e7,
+ )
+ return fee
+
+ async def get_total_stake_for_coldkey(
+ self,
+ *ss58_addresses,
+ block_hash: Optional[str] = None,
+ ) -> dict[str, Balance]:
+ """
+ Returns the total stake held on a coldkey.
+
+ Args:
+ ss58_addresses (tuple[str]): The SS58 address(es) of the coldkey(s)
+ block_hash (str): The hash of the block number to retrieve the stake from.
+
+ Returns:
+ Dict in view {address: Balance objects}.
+ """
+ calls = [
+ (
+ await self.substrate.create_storage_key(
+ "SubtensorModule",
+ "TotalColdkeyStake",
+ [address],
+ block_hash=block_hash,
+ )
+ )
+ for address in ss58_addresses
+ ]
+ batch_call = await self.substrate.query_multi(calls, block_hash=block_hash)
+ results = {}
+ for item in batch_call:
+ results.update({item[0].params[0]: Balance.from_rao(item[1] or 0)})
+ return results
+
+ async def get_total_stake_for_hotkey(
+ self,
+ *ss58_addresses,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict[str, Balance]:
+ """
+ Returns the total stake held on a hotkey.
+
+ Args:
+ ss58_addresses (tuple[str]): The SS58 address(es) of the hotkey(s)
+ block_hash (str): The hash of the block number to retrieve the stake from.
+ reuse_block (bool): Whether to reuse the last-used block hash when retrieving info.
+
+ Returns:
+ Dict {address: Balance objects}.
+ """
+ results = await self.substrate.query_multiple(
+ params=[s for s in ss58_addresses],
+ module="SubtensorModule",
+ storage_function="TotalHotkeyStake",
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return {k: Balance.from_rao(r or 0) for (k, r) in results.items()}
+
+ async def get_netuids_for_hotkey(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[int]:
+ """
+ Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function identifies the specific subnets within the Bittensor network where the neuron associated with the hotkey is active.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ block_hash (Optional[str]): The hash of the blockchain block number at which to perform the query.
+ reuse_block (Optional[bool]): Whether to reuse the last-used block hash when retrieving info.
+
+ Returns:
+ A list of netuids where the neuron is a member.
+ """
+
+ result = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="IsNetworkMember",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return (
+ [record[0] async for record in result if record[1]]
+ if result and hasattr(result, "records")
+ else []
+ )
+
+ async def subnet_exists(
+ self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> bool:
+ """
+ Checks if a subnet with the specified unique identifier (netuid) exists within the Bittensor network.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block_hash (Optional[str]): The hash of the blockchain block number at which to check the subnet existence.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ `True` if the subnet exists, `False` otherwise.
+
+ This function is critical for verifying the presence of specific subnets in the network,
+ enabling a deeper understanding of the network's structure and composition.
+ """
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ params=[netuid],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ return result
+
+ async def get_hyperparameter(
+ self,
+ param_name: str,
+ netuid: int,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional[Any]:
+ """
+ Retrieves a specified hyperparameter for a specific subnet.
+
+ Args:
+ param_name (str): The name of the hyperparameter to retrieve.
+ netuid (int): The unique identifier of the subnet.
+ block_hash (Optional[str]): The hash of blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used block hash.
+
+ Returns:
+ The value of the specified hyperparameter if the subnet exists, or None
+ """
+ if not await self.subnet_exists(netuid, block_hash):
+ print("subnet does not exist")
+ return None
+
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function=param_name,
+ params=[netuid],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+
+ return result
+
+ async def filter_netuids_by_registered_hotkeys(
+ self,
+ all_netuids: Iterable[int],
+ filter_for_netuids: Iterable[int],
+ all_hotkeys: Iterable[Wallet],
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[int]:
+ """
+ Filters a given list of all netuids for certain specified netuids and hotkeys
+
+ Args:
+ all_netuids (Iterable[int]): A list of netuids to filter.
+ filter_for_netuids (Iterable[int]): A subset of all_netuids to filter from the main list
+ all_hotkeys (Iterable[Wallet]): Hotkeys to filter from the main list
+ block_hash (str): hash of the blockchain block number at which to perform the query.
+ reuse_block (bool): whether to reuse the last-used blockchain hash when retrieving info.
+
+ Returns:
+ The filtered list of netuids.
+ """
+ netuids_with_registered_hotkeys = [
+ item
+ for sublist in await asyncio.gather(
+ *[
+ self.get_netuids_for_hotkey(
+ wallet.hotkey.ss58_address,
+ reuse_block=reuse_block,
+ block_hash=block_hash,
+ )
+ for wallet in all_hotkeys
+ ]
+ )
+ for item in sublist
+ ]
+
+ if not filter_for_netuids:
+ all_netuids = netuids_with_registered_hotkeys
+
+ else:
+ filtered_netuids = [
+ netuid for netuid in all_netuids if netuid in filter_for_netuids
+ ]
+
+ registered_hotkeys_filtered = [
+ netuid
+ for netuid in netuids_with_registered_hotkeys
+ if netuid in filter_for_netuids
+ ]
+
+ # Combine both filtered lists
+ all_netuids = filtered_netuids + registered_hotkeys_filtered
+
+ return list(set(all_netuids))
+
+ async def get_existential_deposit(
+ self, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> Balance:
+ """
+ Retrieves the existential deposit amount for the Bittensor blockchain.
+ The existential deposit is the minimum amount of TAO required for an account to exist on the blockchain.
+ Accounts with balances below this threshold can be reaped to conserve network resources.
+
+ Args:
+ block_hash (str): Block hash at which to query the deposit amount. If `None`, the current block is used.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ The existential deposit amount.
+
+ The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring efficient use of storage and preventing the proliferation of dust accounts.
+ """
+ result = await self.substrate.get_constant(
+ module_name="Balances",
+ constant_name="ExistentialDeposit",
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+
+ if result is None:
+ raise Exception("Unable to retrieve existential deposit amount.")
+
+ return Balance.from_rao(result)
+
+ async def neurons(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[NeuronInfo]:
+ """
+ Retrieves a list of all neurons within a specified subnet of the Bittensor network.
+ This function provides a snapshot of the subnet's neuron population, including each neuron's attributes and network interactions.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block_hash (str): The hash of the blockchain block number for the query.
+
+ Returns:
+ A list of NeuronInfo objects detailing each neuron's characteristics in the subnet.
+
+ Understanding the distribution and status of neurons within a subnet is key to comprehending the network's decentralized structure and the dynamics of its consensus and governance processes.
+ """
+ neurons_lite, weights, bonds = await asyncio.gather(
+ self.neurons_lite(netuid=netuid, block_hash=block_hash),
+ self.weights(netuid=netuid, block_hash=block_hash),
+ self.bonds(netuid=netuid, block_hash=block_hash),
+ )
+
+ weights_as_dict = {uid: w for uid, w in weights}
+ bonds_as_dict = {uid: b for uid, b in bonds}
+
+ neurons = [
+ NeuronInfo.from_weights_bonds_and_neuron_lite(
+ neuron_lite, weights_as_dict, bonds_as_dict
+ )
+ for neuron_lite in neurons_lite
+ ]
+
+ return neurons
+
+ async def neurons_lite(
+ self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False
+ ) -> list[NeuronInfoLite]:
+ """
+ Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network.
+ This function provides a streamlined view of the neurons, focusing on key attributes such as stake and network participation.
+
+ Args:
+ netuid (int): The unique identifier of the subnet.
+ block_hash (str): The hash of the blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ A list of simplified neuron information for the subnet.
+
+ This function offers a quick overview of the neuron population within a subnet, facilitating efficient analysis of the network's decentralized structure and neuron dynamics.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="NeuronInfoRuntimeApi",
+ method="get_neurons_lite",
+ params=[
+ netuid
+ ], # TODO check to see if this can accept more than one at a time
+ block_hash=block_hash,
+ reuse_block=reuse_block,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ try:
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ except ValueError:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return NeuronInfoLite.list_from_vec_u8(bytes_result)
+
+ async def neuron_for_uid(
+ self, uid: Optional[int], netuid: int, block_hash: Optional[str] = None
+ ) -> NeuronInfo:
+ """
+ Retrieves detailed information about a specific neuron identified by its unique identifier (UID) within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive view of a neuron's attributes, including its stake, rank, and operational status.
+
+ Args:
+ uid (int): The unique identifier of the neuron.
+ netuid (int): The unique identifier of the subnet.
+ block_hash (str): The hash of the blockchain block number for the query.
+
+ Returns:
+ Detailed information about the neuron if found, a null neuron otherwise
+
+ This function is crucial for analyzing individual neurons' contributions and status within a specific subnet, offering insights into their roles in the network's consensus and validation mechanisms.
+ """
+ if uid is None:
+ return NeuronInfo.get_null_neuron()
+
+ params = [netuid, uid, block_hash] if block_hash else [netuid, uid]
+ json_body = await self.substrate.rpc_request(
+ method="neuronInfo_getNeuron",
+ params=params, # custom rpc method
+ )
+ if not (result := json_body.get("result", None)):
+ return NeuronInfo.get_null_neuron()
+
+ bytes_result = bytes(result)
+ return NeuronInfo.from_vec_u8(bytes_result)
+
+ async def get_delegated(
+ self,
+ coldkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> list[tuple[DelegateInfo, Balance]]:
+ """
+ Retrieves a list of delegates and their associated stakes for a given coldkey. This function identifies the delegates that a specific account has staked tokens on.
+
+ Args:
+ coldkey_ss58 (str): The `SS58` address of the account's coldkey.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ A list of tuples, each containing a delegate's information and staked amount.
+
+ This function is important for account holders to understand their stake allocations and their involvement in the network's delegation and consensus mechanisms.
+ """
+
+ block_hash = (
+ block_hash
+ if block_hash
+ else (self.substrate.last_block_hash if reuse_block else None)
+ )
+ encoded_coldkey = ss58_to_vec_u8(coldkey_ss58)
+ json_body = await self.substrate.rpc_request(
+ method="delegateInfo_getDelegated",
+ params=([block_hash, encoded_coldkey] if block_hash else [encoded_coldkey]),
+ )
+
+ if not (result := json_body.get("result")):
+ return []
+
+ return DelegateInfo.delegated_list_from_vec_u8(bytes(result))
+
+ async def query_identity(
+ self,
+ key: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> dict:
+ """
+ Queries the identity of a neuron on the Bittensor blockchain using the given key. This function retrieves detailed identity information about a specific neuron, which is a crucial aspect of the network's decentralized identity and governance system.
+
+ Args:
+ key (str): The key used to query the neuron's identity, typically the neuron's SS58 address.
+ block_hash (str): The hash of the blockchain block number at which to perform the query.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ An object containing the identity information of the neuron if found, ``None`` otherwise.
+
+ The identity information can include various attributes such as the neuron's stake, rank, and other network-specific details, providing insights into the neuron's role and status within the Bittensor network.
+
+ Note:
+ See the `Bittensor CLI documentation `_ for supported identity parameters.
+ """
+
+ identity_info = await self.substrate.query(
+ module="Registry",
+ storage_function="IdentityOf",
+ params=[key],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ try:
+ return _decode_hex_identity_dict(identity_info["info"])
+ except TypeError:
+ return {}
+
+ async def weights(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[tuple[int, list[tuple[int, int]]]]:
+ """
+ Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network.
+ This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the network's trust and value assignment mechanisms.
+
+ Args:
+ netuid (int): The network UID of the subnet to query.
+ block_hash (str): The hash of the blockchain block for the query.
+
+ Returns:
+ A list of tuples mapping each neuron's UID to its assigned weights.
+
+ The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons, influencing their influence and reward allocation within the subnet.
+ """
+ # TODO look into seeing if we can speed this up with storage query
+ w_map_encoded = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="Weights",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+ w_map = [(uid, w or []) async for uid, w in w_map_encoded]
+
+ return w_map
+
+ async def bonds(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> list[tuple[int, list[tuple[int, int]]]]:
+ """
+ Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network.
+ Bonds represent the investments or commitments made by neurons in one another, indicating a level of trust and perceived value. This bonding mechanism is integral to the network's market-based approach to measuring and rewarding machine intelligence.
+
+ Args:
+ netuid (int): The network UID of the subnet to query.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+
+ Returns:
+ List of tuples mapping each neuron's UID to its bonds with other neurons.
+
+ Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior within the subnet. It reflects how neurons recognize and invest in each other's intelligence and contributions, supporting diverse and niche systems within the Bittensor ecosystem.
+ """
+ b_map_encoded = await self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function="Bonds",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+ b_map = [(uid, b) async for uid, b in b_map_encoded]
+
+ return b_map
+
+ async def does_hotkey_exist(
+ self,
+ hotkey_ss58: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> bool:
+ """
+ Returns true if the hotkey is known by the chain and there are accounts.
+
+ Args:
+ hotkey_ss58 (str): The SS58 address of the hotkey.
+ block_hash (Optional[str]): The hash of the block number to check the hotkey against.
+ reuse_block (bool): Whether to reuse the last-used blockchain hash.
+
+ Returns:
+ `True` if the hotkey is known by the chain and there are accounts, `False` otherwise.
+ """
+ _result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ result = decode_account_id(_result[0])
+ return_val = (
+ False
+ if result is None
+ else result != "5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM"
+ )
+ return return_val
+
+ async def get_hotkey_owner(
+ self, hotkey_ss58: str, block_hash: str
+ ) -> Optional[str]:
+ """
+ Retrieves the owner of the given hotkey at a specific block hash.
+ This function queries the blockchain for the owner of the provided hotkey. If the hotkey does not exist at the specified block hash, it returns None.
+
+ Args:
+ hotkey_ss58 (str): The SS58 address of the hotkey.
+ block_hash (str): The hash of the block at which to check the hotkey ownership.
+
+ Returns:
+ Optional[str]: The SS58 address of the owner if the hotkey exists, or None if it doesn't.
+ """
+ hk_owner_query = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[hotkey_ss58],
+ block_hash=block_hash,
+ )
+ val = decode_account_id(hk_owner_query[0])
+ if val:
+ exists = await self.does_hotkey_exist(hotkey_ss58, block_hash=block_hash)
+ else:
+ exists = False
+ hotkey_owner = val if exists else None
+ return hotkey_owner
+
+ async def sign_and_send_extrinsic(
+ self,
+ call: "GenericCall",
+ wallet: "Wallet",
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ ) -> tuple[bool, str]:
+ """
+ Helper method to sign and submit an extrinsic call to chain.
+
+ Args:
+ call (scalecodec.types.GenericCall): a prepared Call object
+ wallet (bittensor_wallet.Wallet): the wallet whose coldkey will be used to sign the extrinsic
+ wait_for_inclusion (bool): whether to wait until the extrinsic call is included on the chain
+ wait_for_finalization (bool): whether to wait until the extrinsic call is finalized on the chain
+
+ Returns:
+ (success, error message)
+ """
+ extrinsic = await self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ ) # sign with coldkey
+ try:
+ response = await self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, ""
+ await response.process_events()
+ if await response.is_success:
+ return True, ""
+ else:
+ return False, format_error_message(
+ await response.error_message, substrate=self.substrate
+ )
+ except SubstrateRequestException as e:
+ return False, format_error_message(e, substrate=self.substrate)
+
+ async def get_children(self, hotkey: str, netuid: int) -> tuple[bool, list, str]:
+ """
+ This method retrieves the children of a given hotkey and netuid. It queries the SubtensorModule's ChildKeys storage function to get the children and formats them before returning as a tuple.
+
+ Args:
+ hotkey (str): The hotkey value.
+ netuid (int): The netuid value.
+
+ Returns:
+ A tuple containing a boolean indicating success or failure, a list of formatted children, and an error message (if applicable)
+ """
+ try:
+ children = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="ChildKeys",
+ params=[hotkey, netuid],
+ )
+ if children:
+ formatted_children = []
+ for proportion, child in children:
+ # Convert U64 to int
+ formatted_child = decode_account_id(child[0])
+ int_proportion = int(proportion)
+ formatted_children.append((int_proportion, formatted_child))
+ return True, formatted_children, ""
+ else:
+ return True, [], ""
+ except SubstrateRequestException as e:
+ return False, [], format_error_message(e, self.substrate)
+
+ async def get_subnet_hyperparameters(
+ self, netuid: int, block_hash: Optional[str] = None
+ ) -> Optional[Union[list, SubnetHyperparameters]]:
+ """
+ Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters define the operational settings and rules governing the subnet's behavior.
+
+ Args:
+ netuid (int): The network UID of the subnet to query.
+ block_hash (Optional[str]): The hash of the blockchain block number for the query.
+
+ Returns:
+ The subnet's hyperparameters, or `None` if not available.
+
+ Understanding the hyperparameters is crucial for comprehending how subnets are configured and managed, and how they interact with the network's consensus and incentive mechanisms.
+ """
+ hex_bytes_result = await self.query_runtime_api(
+ runtime_api="SubnetInfoRuntimeApi",
+ method="get_subnet_hyperparams",
+ params=[netuid],
+ block_hash=block_hash,
+ )
+
+ if hex_bytes_result is None:
+ return []
+
+ if hex_bytes_result.startswith("0x"):
+ bytes_result = bytes.fromhex(hex_bytes_result[2:])
+ else:
+ bytes_result = bytes.fromhex(hex_bytes_result)
+
+ return SubnetHyperparameters.from_vec_u8(bytes_result)
+
+ async def get_vote_data(
+ self,
+ proposal_hash: str,
+ block_hash: Optional[str] = None,
+ reuse_block: bool = False,
+ ) -> Optional["ProposalVoteData"]:
+ """
+ Retrieves the voting data for a specific proposal on the Bittensor blockchain. This data includes information about how senate members have voted on the proposal.
+
+ Args:
+ proposal_hash (str): The hash of the proposal for which voting data is requested.
+ block_hash (Optional[str]): The hash of the blockchain block number to query the voting data.
+ reuse_block (bool): Whether to reuse the last-used blockchain block hash.
+
+ Returns:
+ An object containing the proposal's voting data, or `None` if not found.
+
+ This function is important for tracking and understanding the decision-making processes within the Bittensor network, particularly how proposals are received and acted upon by the governing body.
+ """
+ vote_data = await self.substrate.query(
+ module="Triumvirate",
+ storage_function="Voting",
+ params=[proposal_hash],
+ block_hash=block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ if vote_data is None:
+ return None
+ else:
+ return ProposalVoteData(vote_data)
+
+ async def get_delegate_identities(
+ self, block_hash: Optional[str] = None
+ ) -> dict[str, DelegatesDetails]:
+ """
+ Fetches delegates identities from the chain and GitHub. Preference is given to chain data, and missing info is filled-in by the info from GitHub. At some point, we want to totally move away from fetching this info from GitHub, but chain data is still limited in that regard.
+
+ Args:
+ block_hash (str): the hash of the blockchain block for the query
+
+ Returns:
+ Dict {ss58: DelegatesDetails, ...}
+
+ """
+ timeout = aiohttp.ClientTimeout(10.0)
+ async with aiohttp.ClientSession(timeout=timeout) as session:
+ identities_info, response = await asyncio.gather(
+ self.substrate.query_map(
+ module="Registry",
+ storage_function="IdentityOf",
+ block_hash=block_hash,
+ ),
+ session.get(DELEGATES_DETAILS_URL),
+ )
+
+ all_delegates_details = {
+ decode_account_id(ss58_address[0]): DelegatesDetails.from_chain_data(
+ decode_hex_identity_dict(identity["info"])
+ )
+ for ss58_address, identity in identities_info
+ }
+
+ if response.ok:
+ all_delegates: dict[str, Any] = await response.json(content_type=None)
+
+ for delegate_hotkey, delegate_details in all_delegates.items():
+ delegate_info = all_delegates_details.setdefault(
+ delegate_hotkey,
+ DelegatesDetails(
+ display=delegate_details.get("name", ""),
+ web=delegate_details.get("url", ""),
+ additional=delegate_details.get("description", ""),
+ pgp_fingerprint=delegate_details.get("fingerprint", ""),
+ ),
+ )
+ delegate_info.display = (
+ delegate_info.display or delegate_details.get("name", "")
+ )
+ delegate_info.web = delegate_info.web or delegate_details.get(
+ "url", ""
+ )
+ delegate_info.additional = (
+ delegate_info.additional
+ or delegate_details.get("description", "")
+ )
+ delegate_info.pgp_fingerprint = (
+ delegate_info.pgp_fingerprint
+ or delegate_details.get("fingerprint", "")
+ )
+
+ return all_delegates_details
+
+ async def is_hotkey_registered(self, netuid: int, hotkey_ss58: str) -> bool:
+ """Checks to see if the hotkey is registered on a given netuid"""
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ )
+ if result is not None:
+ return True
+ else:
+ return False
+
+ async def get_uid_for_hotkey_on_subnet(
+ self, hotkey_ss58: str, netuid: int, block_hash: Optional[str] = None
+ ) -> Optional[int]:
+ """
+ Retrieves the unique identifier (UID) for a neuron's hotkey on a specific subnet.
+
+ Args:
+ hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey.
+ netuid (int): The unique identifier of the subnet.
+ block_hash (Optional[str]): The blockchain block_hash representation of the block id.
+
+ Returns:
+ Optional[int]: The UID of the neuron if it is registered on the subnet, ``None`` otherwise.
+
+ The UID is a critical identifier within the network, linking the neuron's hotkey to its operational and governance activities on a particular subnet.
+ """
+ result = await self.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ block_hash=block_hash,
+ )
+ return result
+
+ async def weights_rate_limit(self, netuid: int) -> Optional[int]:
+ """
+ Returns network WeightsSetRateLimit hyperparameter.
+
+ Args:
+ netuid (int): The unique identifier of the subnetwork.
+
+ Returns:
+ Optional[int]: The value of the WeightsSetRateLimit hyperparameter, or ``None`` if the subnetwork does not exist or the parameter is not found.
+ """
+ call = await self.get_hyperparameter(
+ param_name="WeightsSetRateLimit", netuid=netuid
+ )
+ return None if call is None else int(call)
+
+ async def blocks_since_last_update(self, netuid: int, uid: int) -> Optional[int]:
+ """
+ Returns the number of blocks since the last update for a specific UID in the subnetwork.
+
+ Args:
+ netuid (int): The unique identifier of the subnetwork.
+ uid (int): The unique identifier of the neuron.
+
+ Returns:
+ Optional[int]: The number of blocks since the last update, or ``None`` if the subnetwork or UID does not exist.
+ """
+ call = await self.get_hyperparameter(param_name="LastUpdate", netuid=netuid)
+ return None if call is None else await self.get_current_block() - int(call[uid])
+
+ # extrinsics
+
+ async def transfer(
+ self,
+ wallet: "Wallet",
+ destination: str,
+ amount: float,
+ transfer_all: bool,
+ ) -> bool:
+ """
+ Transfer token of amount to destination.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): Source wallet for the transfer.
+ destination (str): Destination address for the transfer.
+ amount (float): Amount of tokens to transfer.
+ transfer_all (bool): Flag to transfer all tokens.
+
+ Returns:
+ `True` if the transferring was successful, otherwise `False`.
+ """
+ return await transfer_extrinsic(
+ self,
+ wallet,
+ destination,
+ Balance.from_tao(amount),
+ transfer_all,
+ )
+
+ async def register(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ block_hash: Optional[str] = None,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = True,
+ ) -> bool:
+ """
+ Register neuron by recycling some TAO.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): Bittensor wallet instance.
+ netuid (int): Subnet uniq id.
+ block_hash (Optional[str]): The hash of the blockchain block for the query.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
+
+ Returns:
+ `True` if registration was successful, otherwise `False`.
+ """
+ logging.info(
+ f"Registering on netuid 0 on network: {self.network}"
+ )
+
+ # Check current recycle amount
+ logging.info("Fetching recycle amount & balance.")
+ block_hash = block_hash if block_hash else await self.get_block_hash()
+ recycle_call, balance_ = await asyncio.gather(
+ self.get_hyperparameter(param_name="Burn", netuid=netuid, reuse_block=True),
+ self.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash),
+ )
+ current_recycle = Balance.from_rao(int(recycle_call))
+ try:
+ balance: Balance = balance_[wallet.coldkeypub.ss58_address]
+ except TypeError as e:
+ logging.error(f"Unable to retrieve current recycle. {e}")
+ return False
+ except KeyError:
+ logging.error("Unable to retrieve current balance.")
+ return False
+
+ # Check balance is sufficient
+ if balance < current_recycle:
+ logging.error(
+ f"Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO."
+ )
+ return False
+
+ return await root_register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ async def pow_register(
+ self: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid,
+ processors,
+ update_interval,
+ output_in_place,
+ verbose,
+ use_cuda,
+ dev_id,
+ threads_per_block,
+ ):
+ """Register neuron."""
+ return await register_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ tpb=threads_per_block,
+ update_interval=update_interval,
+ num_processes=processors,
+ cuda=use_cuda,
+ dev_id=dev_id,
+ output_in_place=output_in_place,
+ log_verbose=verbose,
+ )
+
+ async def set_weights(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ uids: Union[NDArray[np.int64], "torch.LongTensor", list],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list],
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ max_retries: int = 5,
+ ):
+ """
+ Sets the inter-neuronal weights for the specified neuron. This process involves specifying the influence or trust a neuron places on other neurons in the network, which is a fundamental aspect of Bittensor's decentralized learning architecture.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights.
+ netuid (int): The unique identifier of the subnet.
+ uids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs that the weights are being set for.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each UID.
+ version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
+ max_retries (int): The number of maximum attempts to set weights. Default is ``5``.
+
+ Returns:
+ tuple[bool, str]: ``True`` if the setting of weights is successful, False otherwise. And `msg`, a string value describing the success or potential error.
+
+ This function is crucial in shaping the network's collective intelligence, where each neuron's learning and contribution are influenced by the weights it sets towards others【81†source】.
+ """
+ uid = await self.get_uid_for_hotkey_on_subnet(
+ wallet.hotkey.ss58_address, netuid
+ )
+ retries = 0
+ success = False
+ message = "No attempt made. Perhaps it is too soon to set weights!"
+ while (
+ await self.blocks_since_last_update(netuid, uid)
+ > await self.weights_rate_limit(netuid)
+ and retries < max_retries
+ ):
+ try:
+ logging.info(
+ f"Setting weights for subnet #{netuid}. Attempt {retries + 1} of {max_retries}."
+ )
+ success, message = await set_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ uids=uids,
+ weights=weights,
+ version_key=version_key,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ except Exception as e:
+ logging.error(f"Error setting weights: {e}")
+ finally:
+ retries += 1
+
+ return success, message
+
+ async def root_set_weights(
+ self,
+ wallet: "Wallet",
+ netuids: list[int],
+ weights: list[float],
+ ) -> bool:
+ """
+ Set weights for root network.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): bittensor wallet instance.
+ netuids (list[int]): The list of subnet uids.
+ weights (list[float]): The list of weights to be set.
+
+ Returns:
+ `True` if the setting of weights is successful, `False` otherwise.
+ """
+ netuids_ = np.array(netuids, dtype=np.int64)
+ weights_ = np.array(weights, dtype=np.float32)
+ logging.info(f"Setting weights in network: {self.network}")
+ # Run the set weights operation.
+ return await set_root_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuids=netuids_,
+ weights=weights_,
+ version_key=0,
+ wait_for_finalization=True,
+ wait_for_inclusion=True,
+ )
+
+ async def commit_weights(
+ self,
+ wallet: "Wallet",
+ netuid: int,
+ salt: list[int],
+ uids: Union[NDArray[np.int64], list],
+ weights: Union[NDArray[np.int64], list],
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ max_retries: int = 5,
+ ) -> tuple[bool, str]:
+ """
+ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet.
+ This action serves as a commitment or snapshot of the neuron's current weight distribution.
+
+ Args:
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights.
+ netuid (int): The unique identifier of the subnet.
+ salt (list[int]): list of randomly generated integers as salt to generated weighted hash.
+ uids (np.ndarray): NumPy array of neuron UIDs for which weights are being committed.
+ weights (np.ndarray): NumPy array of weight values corresponding to each UID.
+ version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``.
+ max_retries (int): The number of maximum attempts to commit weights. Default is ``5``.
+
+ Returns:
+ tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string value describing the success or potential error.
+
+ This function allows neurons to create a tamper-proof record of their weight distribution at a specific point in time, enhancing transparency and accountability within the Bittensor network.
+ """
+ retries = 0
+ success = False
+ message = "No attempt made. Perhaps it is too soon to commit weights!"
+
+ logging.info(
+ f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}"
+ )
+
+ # Generate the hash of the weights
+ commit_hash = generate_weight_hash(
+ address=wallet.hotkey.ss58_address,
+ netuid=netuid,
+ uids=list(uids),
+ values=list(weights),
+ salt=salt,
+ version_key=version_key,
+ )
+
+ while retries < max_retries:
+ try:
+ success, message = await commit_weights_extrinsic(
+ subtensor=self,
+ wallet=wallet,
+ netuid=netuid,
+ commit_hash=commit_hash,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ if success:
+ break
+ except Exception as e:
+ logging.error(f"Error committing weights: {e}")
+ finally:
+ retries += 1
+
+ return success, message
diff --git a/bittensor/core/chain_data/__init__.py b/bittensor/core/chain_data/__init__.py
index 9ad1e38881..68936a6b5f 100644
--- a/bittensor/core/chain_data/__init__.py
+++ b/bittensor/core/chain_data/__init__.py
@@ -17,6 +17,6 @@
from .stake_info import StakeInfo
from .subnet_hyperparameters import SubnetHyperparameters
from .subnet_info import SubnetInfo
-from .utils import custom_rpc_type_registry
+from .utils import custom_rpc_type_registry, decode_account_id, process_stake_data
ProposalCallData = GenericCall
diff --git a/bittensor/core/extrinsics/async_registration.py b/bittensor/core/extrinsics/async_registration.py
new file mode 100644
index 0000000000..a0901a5639
--- /dev/null
+++ b/bittensor/core/extrinsics/async_registration.py
@@ -0,0 +1,1559 @@
+import asyncio
+import binascii
+import functools
+import hashlib
+import io
+import math
+import multiprocessing as mp
+import os
+import random
+import subprocess
+import time
+from contextlib import redirect_stdout
+from dataclasses import dataclass
+from datetime import timedelta
+from multiprocessing import Process, Event, Lock, Array, Value, Queue
+from multiprocessing.queues import Queue as Queue_Type
+from queue import Empty, Full
+from typing import Optional, Union, TYPE_CHECKING, Callable, Any
+
+import backoff
+import numpy as np
+from Crypto.Hash import keccak
+from bittensor_wallet import Wallet
+from rich.console import Console
+from rich.status import Status
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.chain_data import NeuronInfo
+from bittensor.utils import format_error_message, unlock_key
+from bittensor.utils.btlogging import logging
+from bittensor.utils.formatting import millify, get_human_readable
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+# TODO: compair and remove existing code (bittensor.utils.registration)
+
+
+def use_torch() -> bool:
+ """Force the use of torch over numpy for certain operations."""
+ return True if os.getenv("USE_TORCH") == "1" else False
+
+
+def legacy_torch_api_compat(func: Callable):
+ """
+ Convert function operating on numpy Input&Output to legacy torch Input&Output API if `use_torch()` is True.
+
+ Args:
+ func: Function with numpy Input/Output to be decorated.
+
+ Returns:
+ Decorated function
+ """
+
+ @functools.wraps(func)
+ def decorated(*args, **kwargs):
+ if use_torch():
+ # if argument is a Torch tensor, convert it to numpy
+ args = [
+ arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg
+ for arg in args
+ ]
+ kwargs = {
+ key: value.cpu().numpy() if isinstance(value, torch.Tensor) else value
+ for key, value in kwargs.items()
+ }
+ ret = func(*args, **kwargs)
+ if use_torch():
+ # if return value is a numpy array, convert it to Torch tensor
+ if isinstance(ret, np.ndarray):
+ ret = torch.from_numpy(ret)
+ return ret
+
+ return decorated
+
+
+@functools.cache
+def _get_real_torch():
+ try:
+ import torch as _real_torch
+ except ImportError:
+ _real_torch = None
+ return _real_torch
+
+
+def log_no_torch_error():
+ logging.info(
+ "This command requires torch. You can install torch with `pip install torch` and run the command again."
+ )
+
+
+@dataclass
+class POWSolution:
+ """A solution to the registration PoW problem."""
+
+ nonce: int
+ block_number: int
+ difficulty: int
+ seal: bytes
+
+ async def is_stale(self, subtensor: "AsyncSubtensor") -> bool:
+ """
+ Returns True if the POW is stale.
+
+ This means the block the POW is solved for is within 3 blocks of the current block.
+ """
+ current_block = await subtensor.substrate.get_block_number(None)
+ return self.block_number < current_block - 3
+
+
+@dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: str
+
+
+class RegistrationStatisticsLogger:
+ """Logs statistics for a registration."""
+
+ console: Console
+ status: Optional[Status]
+
+ def __init__(
+ self, console_: Optional["Console"] = None, output_in_place: bool = True
+ ) -> None:
+ if console_ is None:
+ console_ = Console()
+ self.console = console_
+
+ if output_in_place:
+ self.status = self.console.status("Solving")
+ else:
+ self.status = None
+
+ def start(self) -> None:
+ if self.status is not None:
+ self.status.start()
+
+ def stop(self) -> None:
+ if self.status is not None:
+ self.status.stop()
+
+ @classmethod
+ def get_status_message(
+ cls, stats: RegistrationStatistics, verbose: bool = False
+ ) -> str:
+ """Provides a message of the current status of the block solving as a str for a logger or stdout."""
+ message = (
+ "Solving\n"
+ + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n"
+ + (
+ f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n"
+ + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n"
+ if verbose
+ else ""
+ )
+ + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n"
+ + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / "
+ + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n"
+ + f"Block Number: [bold white]{stats.block_number}[/bold white]\n"
+ + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n"
+ )
+ return message
+
+ def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None:
+ """Passes the current status to the logger."""
+ if self.status is not None:
+ self.status.update(self.get_status_message(stats, verbose=verbose))
+ else:
+ self.console.log(self.get_status_message(stats, verbose=verbose))
+
+
+class _SolverBase(Process):
+ """
+ A process that solves the registration PoW problem.
+
+ Args:
+ proc_num: The number of the process being created.
+ num_proc: The total number of processes running.
+ update_interval: The number of nonces to try to solve before checking for a new block.
+ finished_queue: The queue to put the process number when a process finishes each update_interval. Used for calculating the average time per update_interval across all processes.
+ solution_queue: The queue to put the solution the process has found during the pow solve.
+ stop_event: The event to set by the main process when all the solver processes should stop. The solver process will check for the event after each update_interval. The solver process will stop when the event is set. Used to stop the solver processes when a solution is found.
+ curr_block: The array containing this process's current block hash. The main process will set the array to the new block hash when a new block is finalized in the network. The solver process will get the new block hash from this array when newBlockEvent is set
+ curr_block_num: The value containing this process's current block number. The main process will set the value to the new block number when a new block is finalized in the network. The solver process will get the new block number from this value when new_block_event is set.
+ curr_diff: The array containing this process's current difficulty. The main process will set the array to the new difficulty when a new block is finalized in the network. The solver process will get the new difficulty from this array when newBlockEvent is set.
+ check_block: The lock to prevent this process from getting the new block data while the main process is updating the data.
+ limit: The limit of the pow solve for a valid solution.
+
+ Returns:
+ new_block_event: The event to set by the main process when a new block is finalized in the network. The solver process will check for the event after each update_interval. The solver process will get the new block hash and difficulty and start solving for a new nonce.
+ """
+
+ proc_num: int
+ num_proc: int
+ update_interval: int
+ finished_queue: Queue_Type
+ solution_queue: Queue_Type
+ new_block_event: Event
+ stop_event: Event
+ hotkey_bytes: bytes
+ curr_block: Array
+ curr_block_num: Value
+ curr_diff: Array
+ check_block: Lock
+ limit: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ ):
+ Process.__init__(self, daemon=True)
+ self.proc_num = proc_num
+ self.num_proc = num_proc
+ self.update_interval = update_interval
+ self.finished_queue = finished_queue
+ self.solution_queue = solution_queue
+ self.new_block_event = Event()
+ self.new_block_event.clear()
+ self.curr_block = curr_block
+ self.curr_block_num = curr_block_num
+ self.curr_diff = curr_diff
+ self.check_block = check_block
+ self.stop_event = stop_event
+ self.limit = limit
+
+ def run(self):
+ raise NotImplementedError("_SolverBase is an abstract class")
+
+ @staticmethod
+ def create_shared_memory() -> tuple[Array, Value, Array]:
+ """Creates shared memory for the solver processes to use."""
+ curr_block = Array("h", 32, lock=True) # byte array
+ curr_block_num = Value("i", 0, lock=True) # int
+ curr_diff = Array("Q", [0, 0], lock=True) # [high, low]
+
+ return curr_block, curr_block_num, curr_diff
+
+
+class _Solver(_SolverBase):
+ """Performs POW Solution."""
+
+ def run(self):
+ block_number: int
+ block_and_hotkey_hash_bytes: bytes
+ block_difficulty: int
+ nonce_limit = int(math.pow(2, 64)) - 1
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_end = nonce_start + self.update_interval
+ while not self.stop_event.is_set():
+ if self.new_block_event.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.new_block_event.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block(
+ nonce_start,
+ nonce_end,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Send time
+ self.finished_queue.put_nowait(self.proc_num)
+ except Full:
+ pass
+
+ nonce_start = random.randint(0, nonce_limit)
+ nonce_start = nonce_start % nonce_limit
+ nonce_end = nonce_start + self.update_interval
+
+
+class _CUDASolver(_SolverBase):
+ """Performs POW Solution using CUDA."""
+
+ dev_id: int
+ tpb: int
+
+ def __init__(
+ self,
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id: int,
+ tpb: int,
+ ):
+ super().__init__(
+ proc_num,
+ num_proc,
+ update_interval,
+ finished_queue,
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ self.dev_id = dev_id
+ self.tpb = tpb
+
+ def run(self):
+ block_number: int = 0 # dummy value
+ block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value
+ block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value
+ nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX
+
+ # Start at random nonce
+ nonce_start = random.randint(0, nonce_limit)
+ while not self.stop_event.is_set():
+ if self.new_block_event.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_and_hotkey_hash_bytes = bytes(self.curr_block)
+ block_difficulty = _registration_diff_unpack(self.curr_diff)
+
+ self.new_block_event.clear()
+
+ # Do a block of nonces
+ solution = _solve_for_nonce_block_cuda(
+ nonce_start,
+ self.update_interval,
+ block_and_hotkey_hash_bytes,
+ block_difficulty,
+ self.limit,
+ block_number,
+ self.dev_id,
+ self.tpb,
+ )
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Signal that a nonce_block was finished using queue
+ # send our proc_num
+ self.finished_queue.put(self.proc_num)
+ except Full:
+ pass
+
+ # increase nonce by number of nonces processed
+ nonce_start += self.update_interval * self.tpb
+ nonce_start = nonce_start % nonce_limit
+
+
+class LazyLoadedTorch:
+ def __bool__(self):
+ return bool(_get_real_torch())
+
+ def __getattr__(self, name):
+ if real_torch := _get_real_torch():
+ return getattr(real_torch, name)
+ else:
+ log_no_torch_error()
+ raise ImportError("torch not installed")
+
+
+if TYPE_CHECKING:
+ import torch
+else:
+ torch = LazyLoadedTorch()
+
+
+class MaxSuccessException(Exception):
+ """Raised when the POW Solver has reached the max number of successful solutions."""
+
+
+class MaxAttemptsException(Exception):
+ """Raised when the POW Solver has reached the max number of attempts."""
+
+
+async def is_hotkey_registered(
+ subtensor: "AsyncSubtensor", netuid: int, hotkey_ss58: str
+) -> bool:
+ """Checks to see if the hotkey is registered on a given netuid"""
+ _result = await subtensor.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, hotkey_ss58],
+ )
+ if _result is not None:
+ return True
+ else:
+ return False
+
+
+async def register_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+) -> bool:
+ """Registers the wallet to the chain.
+
+ Args:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): initialized AsyncSubtensor object to use for chain interactions
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to register on.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ max_allowed_attempts (int): Maximum number of attempts to register the wallet.
+ output_in_place (bool): Whether the POW solving should be outputted to the console as it goes along.
+ cuda (bool): If `True`, the wallet should be registered using CUDA device(s).
+ dev_id: The CUDA device id to use, or a list of device ids.
+ tpb: The number of threads per block (CUDA).
+ num_processes: The number of processes to use to register.
+ update_interval: The number of nonces to solve between updates.
+ log_verbose: If `True`, the registration process will log more information.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`.
+ """
+
+ async def get_neuron_for_pubkey_and_subnet():
+ uid = await subtensor.substrate.query(
+ "SubtensorModule", "Uids", [netuid, wallet.hotkey.ss58_address]
+ )
+ if uid is None:
+ return NeuronInfo.get_null_neuron()
+
+ params = [netuid, uid]
+ json_body = await subtensor.substrate.rpc_request(
+ method="neuronInfo_getNeuron",
+ params=params,
+ )
+
+ if not (result := json_body.get("result", None)):
+ return NeuronInfo.get_null_neuron()
+
+ return NeuronInfo.from_vec_u8(bytes(result))
+
+ logging.debug("Checking subnet status")
+ if not await subtensor.subnet_exists(netuid):
+ logging.error(
+ f":cross_mark: Failed error: subnet {netuid} does not exist."
+ )
+ return False
+
+ logging.info(
+ f":satellite: Checking Account on subnet {netuid} ..."
+ )
+ neuron = await get_neuron_for_pubkey_and_subnet()
+ if not neuron.is_null:
+ logging.debug(
+ f"Wallet {wallet} is already registered on subnet {neuron.netuid} with uid{neuron.uid}."
+ )
+ return True
+
+ if not torch:
+ log_no_torch_error()
+ return False
+
+ # Attempt rolling registration.
+ attempts = 1
+ pow_result: Optional[POWSolution]
+ while True:
+ logging.info(
+ f":satellite: Registering... ({attempts}/{max_allowed_attempts})"
+ )
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ return False
+ pow_result = await create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result = await create_pow(
+ subtensor,
+ wallet,
+ netuid,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ # pow failed
+ if not pow_result:
+ # might be registered already on this subnet
+ is_registered = await is_hotkey_registered(
+ subtensor, netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.error(
+ f":white_heavy_check_mark: Already registered on netuid: {netuid}"
+ )
+ return True
+
+ # pow successful, proceed to submit pow to chain for registration
+ else:
+ logging.info(":satellite: Submitting POW...")
+ # check if pow result is still valid
+ while not await pow_result.is_stale(subtensor=subtensor):
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="register",
+ call_params={
+ "netuid": netuid,
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ "hotkey": wallet.hotkey.ss58_address,
+ "coldkey": wallet.coldkeypub.ss58_address,
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.hotkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ if not wait_for_finalization and not wait_for_inclusion:
+ success, err_msg = True, ""
+ else:
+ await response.process_events()
+ success = await response.is_success
+ if not success:
+ success, err_msg = (
+ False,
+ format_error_message(
+ await response.error_message,
+ substrate=subtensor.substrate,
+ ),
+ )
+ # Look error here
+ # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs
+
+ if "HotKeyAlreadyRegisteredInSubNet" in err_msg:
+ logging.info(
+ f":white_heavy_check_mark: Already Registered on subnet: {netuid}."
+ )
+ return True
+ logging.error(f":cross_mark: Failed: {err_msg}")
+ await asyncio.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ if success:
+ logging.info(":satellite: Checking Registration status...")
+ is_registered = await is_hotkey_registered(
+ subtensor,
+ netuid=netuid,
+ hotkey_ss58=wallet.hotkey.ss58_address,
+ )
+ if is_registered:
+ logging.success(
+ ":white_heavy_check_mark: Registered"
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(
+ ":cross_mark: Unknown error. Neuron not found."
+ )
+ continue
+ else:
+ # Exited loop because pow is no longer valid.
+ logging.error("POW is stale.")
+ # Try again.
+ continue
+
+ if attempts < max_allowed_attempts:
+ # Failed registration, retry pow
+ attempts += 1
+ logging.error(
+ f":satellite: Failed registration, retrying pow ... ({attempts}/{max_allowed_attempts})"
+ )
+ else:
+ # Failed to register after max attempts.
+ logging.error("No more attempts.")
+ return False
+
+
+async def run_faucet_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: int = 0,
+ tpb: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+ max_successes: int = 3,
+) -> tuple[bool, str]:
+ """Runs a continual POW to get a faucet of TAO on the test net.
+
+ Args:
+ subtensor: The subtensor interface object used to run the extrinsic
+ wallet: Bittensor wallet object.
+ wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ max_allowed_attempts: Maximum number of attempts to register the wallet.
+ output_in_place: Whether to output logging data as the process runs.
+ cuda: If `True`, the wallet should be registered using CUDA device(s).
+ dev_id: The CUDA device id to use
+ tpb: The number of threads per block (CUDA).
+ num_processes: The number of processes to use to register.
+ update_interval: The number of nonces to solve between updates.
+ log_verbose: If `True`, the registration process will log more information.
+ max_successes: The maximum number of successful faucet runs for the wallet.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is also `True`
+ """
+
+ if not torch:
+ log_no_torch_error()
+ return False, "Requires torch"
+
+ # Unlock coldkey
+ if not (unlock := unlock_key(wallet)).success:
+ return False, unlock.message
+
+ # Get previous balance.
+ old_balance = await subtensor.get_balance(wallet.coldkeypub.ss58_address)
+
+ # Attempt rolling registration.
+ attempts = 1
+ successes = 1
+ while True:
+ try:
+ pow_result = None
+ while pow_result is None or await pow_result.is_stale(subtensor=subtensor):
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ return False, "CUDA is not available."
+ pow_result: Optional[POWSolution] = await create_pow(
+ subtensor,
+ wallet,
+ -1,
+ output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ tpb=tpb,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ pow_result: Optional[POWSolution] = await create_pow(
+ subtensor,
+ wallet,
+ -1,
+ output_in_place,
+ cuda=cuda,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="faucet",
+ call_params={
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ # process if registration successful, try again if pow is still valid
+ await response.process_events()
+ if not await response.is_success:
+ logging.error(
+ f":cross_mark: Failed: {format_error_message(await response.error_message, subtensor.substrate)}"
+ )
+ if attempts == max_allowed_attempts:
+ raise MaxAttemptsException
+ attempts += 1
+ # Wait a bit before trying again
+ time.sleep(1)
+
+ # Successful registration
+ else:
+ new_balance = await subtensor.get_balance(
+ wallet.coldkeypub.ss58_address
+ )
+ logging.info(
+ f"Balance: {old_balance[wallet.coldkeypub.ss58_address]} :arrow_right: {new_balance[wallet.coldkeypub.ss58_address]}"
+ )
+ old_balance = new_balance
+
+ if successes == max_successes:
+ raise MaxSuccessException
+
+ attempts = 1 # Reset attempts on success
+ successes += 1
+
+ except KeyboardInterrupt:
+ return True, "Done"
+
+ except MaxSuccessException:
+ return True, f"Max successes reached: {3}"
+
+ except MaxAttemptsException:
+ return False, f"Max attempts reached: {max_allowed_attempts}"
+
+
+async def _check_for_newest_block_and_update(
+ subtensor: "AsyncSubtensor",
+ netuid: int,
+ old_block_number: int,
+ hotkey_bytes: bytes,
+ curr_diff: Array,
+ curr_block: Array,
+ curr_block_num: Value,
+ update_curr_block: "Callable",
+ check_block: Lock,
+ solvers: list[_Solver],
+ curr_stats: "RegistrationStatistics",
+) -> int:
+ """
+ Checks for a new block and updates the current block information if a new block is found.
+
+ Args:
+ subtensor: The subtensor object to use for getting the current block.
+ netuid: The netuid to use for retrieving the difficulty.
+ old_block_number: The old block number to check against.
+ hotkey_bytes: The bytes of the hotkey's pubkey.
+ curr_diff: The current difficulty as a multiprocessing array.
+ curr_block: Where the current block is stored as a multiprocessing array.
+ curr_block_num: Where the current block number is stored as a multiprocessing value.
+ update_curr_block: A function that updates the current block.
+ check_block: A mp lock that is used to check for a new block.
+ solvers: A list of solvers to update the current block for.
+ curr_stats: The current registration statistics to update.
+
+ Returns:
+ The current block number.
+ """
+ block_number = await subtensor.substrate.get_block_number(None)
+ if block_number != old_block_number:
+ old_block_number = block_number
+ # update block information
+ block_number, difficulty, block_hash = await _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+ block_bytes = bytes.fromhex(block_hash[2:])
+
+ update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+ # Set new block events for each solver
+
+ for worker in solvers:
+ worker.new_block_event.set()
+
+ # update stats
+ curr_stats.block_number = block_number
+ curr_stats.block_hash = block_hash
+ curr_stats.difficulty = difficulty
+
+ return old_block_number
+
+
+async def _block_solver(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ num_processes: int,
+ netuid: int,
+ dev_id: list[int],
+ tpb: int,
+ update_interval: int,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ n_samples,
+ alpha_,
+ output_in_place,
+ log_verbose,
+ cuda: bool,
+):
+ """Shared code used by the Solvers to solve the POW solution."""
+ limit = int(math.pow(2, 256)) - 1
+
+ # Establish communication queues
+ # See the _Solver class for more information on the queues.
+ stop_event = Event()
+ stop_event.clear()
+
+ solution_queue = Queue()
+ finished_queues = [Queue() for _ in range(num_processes)]
+ check_block = Lock()
+
+ hotkey_bytes = (
+ wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key
+ )
+
+ if cuda:
+ # Create a worker per CUDA device
+ num_processes = len(dev_id)
+ solvers = [
+ _CUDASolver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ dev_id[i],
+ tpb,
+ )
+ for i in range(num_processes)
+ ]
+ else:
+ # Start consumers
+ solvers = [
+ _Solver(
+ i,
+ num_processes,
+ update_interval,
+ finished_queues[i],
+ solution_queue,
+ stop_event,
+ curr_block,
+ curr_block_num,
+ curr_diff,
+ check_block,
+ limit,
+ )
+ for i in range(num_processes)
+ ]
+
+ # Get first block
+ block_number, difficulty, block_hash = await _get_block_with_retry(
+ subtensor=subtensor, netuid=netuid
+ )
+
+ block_bytes = bytes.fromhex(block_hash[2:])
+ old_block_number = block_number
+ # Set to current block
+ _update_curr_block(
+ curr_diff,
+ curr_block,
+ curr_block_num,
+ block_number,
+ block_bytes,
+ difficulty,
+ hotkey_bytes,
+ check_block,
+ )
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.new_block_event.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total=0.0,
+ time_average=0.0,
+ rounds_total=0,
+ time_spent=0.0,
+ hash_rate_perpetual=0.0,
+ hash_rate=0.0,
+ difficulty=difficulty,
+ block_number=block_number,
+ block_hash=block_hash,
+ )
+
+ start_time_perpetual = time.time()
+
+ logger = RegistrationStatisticsLogger(output_in_place=output_in_place)
+ logger.start()
+
+ solution = None
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha
+
+ timeout = 0.15 if cuda else 0.15
+ while netuid == -1 or not await is_hotkey_registered(
+ subtensor, netuid, wallet.hotkey.ss58_address
+ ):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=timeout)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = await _check_for_newest_block_and_update(
+ subtensor=subtensor,
+ netuid=netuid,
+ hotkey_bytes=hotkey_bytes,
+ old_block_number=old_block_number,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_stats=curr_stats,
+ update_curr_block=_update_curr_block,
+ check_block=check_block,
+ solvers=solvers,
+ )
+
+ num_time = 0
+ for finished_queue in finished_queues:
+ try:
+ finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ if cuda:
+ hash_rate_ = (num_time * tpb * update_interval) / time_since_last
+ else:
+ hash_rate_ = (num_time * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum(
+ [hash_rates[i] * weights[i] for i in range(n_samples)]
+ ) / (sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (
+ curr_stats.time_average * curr_stats.rounds_total
+ + curr_stats.time_spent
+ ) / (curr_stats.rounds_total + num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ if cuda:
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * (tpb * update_interval)
+ ) / new_time_spent_total
+ else:
+ curr_stats.hash_rate_perpetual = (
+ curr_stats.rounds_total * update_interval
+ ) / new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, solution contains the nonce or wallet is registered
+ stop_event.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ _terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+async def _solve_for_difficulty_fast_cuda(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ update_interval: int = 50_000,
+ tpb: int = 512,
+ dev_id: Union[list[int], int] = 0,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional["POWSolution"]:
+ """
+ Solves the registration fast using CUDA
+
+ Args:
+ subtensor: The subtensor node to grab blocks
+ wallet: The wallet to register
+ netuid: The netuid of the subnet to register to.
+ output_in_place: If true, prints the output in place, otherwise prints to new lines
+ update_interval: The number of nonces to try before checking for more blocks
+ tpb: The number of threads per block. CUDA param that should match the GPU capability
+ dev_id: The CUDA device IDs to execute the registration on, either a single device or a list of devices
+ n_samples: The number of samples of the hash_rate to keep for the EWMA
+ alpha_: The alpha for the EWMA for the hash_rate calculation
+ log_verbose: If true, prints more verbose logging of the registration metrics.
+
+ Note:
+ The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ """
+ if isinstance(dev_id, int):
+ dev_id = [dev_id]
+ elif dev_id is None:
+ dev_id = [0]
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ if not torch.cuda.is_available():
+ raise Exception("CUDA not available")
+
+ # Set mp start to use spawn so CUDA doesn't complain
+ with _UsingSpawnStartMethod(force=True):
+ curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory()
+
+ solution = await _block_solver(
+ subtensor=subtensor,
+ wallet=wallet,
+ num_processes=None,
+ netuid=netuid,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_diff=curr_diff,
+ n_samples=n_samples,
+ alpha_=alpha_,
+ output_in_place=output_in_place,
+ log_verbose=log_verbose,
+ cuda=True,
+ )
+
+ return solution
+
+
+async def _solve_for_difficulty_fast(
+ subtensor,
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ n_samples: int = 10,
+ alpha_: float = 0.80,
+ log_verbose: bool = False,
+) -> Optional[POWSolution]:
+ """
+ Solves the POW for registration using multiprocessing.
+
+ Args:
+ subtensor: Subtensor to connect to for block information and to submit.
+ wallet: wallet to use for registration.
+ netuid: The netuid of the subnet to register to.
+ output_in_place: If true, prints the status in place. Otherwise, prints the status on a new line.
+ num_processes: Number of processes to use.
+ update_interval: Number of nonces to solve before updating block information.
+ n_samples: The number of samples of the hash_rate to keep for the EWMA
+ alpha_: The alpha for the EWMA for the hash_rate calculation
+ log_verbose: If true, prints more verbose logging of the registration metrics.
+
+ Notes:
+ The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ We can also modify the update interval to do smaller blocks of work, while still updating the block information after a different number of nonces, to increase the transparency of the process while still keeping the speed.
+ """
+ if not num_processes:
+ # get the number of allowed processes for this process
+ num_processes = min(1, get_cpu_count())
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory()
+
+ solution = await _block_solver(
+ subtensor=subtensor,
+ wallet=wallet,
+ num_processes=num_processes,
+ netuid=netuid,
+ dev_id=None,
+ tpb=None,
+ update_interval=update_interval,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_diff=curr_diff,
+ n_samples=n_samples,
+ alpha_=alpha_,
+ output_in_place=output_in_place,
+ log_verbose=log_verbose,
+ cuda=False,
+ )
+
+ return solution
+
+
+def _terminate_workers_and_wait_for_exit(
+ workers: list[Union[Process, Queue_Type]],
+) -> None:
+ for worker in workers:
+ if isinstance(worker, Queue_Type):
+ worker.join_thread()
+ else:
+ try:
+ worker.join(3.0)
+ except subprocess.TimeoutExpired:
+ worker.terminate()
+ try:
+ worker.close()
+ except ValueError:
+ worker.terminate()
+
+
+# TODO verify this works with async
+@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3)
+async def _get_block_with_retry(
+ subtensor: "AsyncSubtensor", netuid: int
+) -> tuple[int, int, bytes]:
+ """
+ Gets the current block number, difficulty, and block hash from the substrate node.
+
+ Args:
+ subtensor: The subtensor object to use to get the block number, difficulty, and block hash.
+ netuid: The netuid of the network to get the block number, difficulty, and block hash from.
+
+ Returns:
+ The current block number, difficulty of the subnet, block hash
+
+ Raises:
+ Exception: If the block hash is None.
+ ValueError: If the difficulty is None.
+ """
+ block_number = await subtensor.substrate.get_block_number(None)
+ block_hash = await subtensor.substrate.get_block_hash(
+ block_number
+ ) # TODO check if I need to do all this
+ try:
+ difficulty = (
+ 1_000_000
+ if netuid == -1
+ else int(
+ await subtensor.get_hyperparameter(
+ param_name="Difficulty", netuid=netuid, block_hash=block_hash
+ )
+ )
+ )
+ except TypeError:
+ raise ValueError("Chain error. Difficulty is None")
+ except SubstrateRequestException:
+ raise Exception(
+ "Network error. Could not connect to substrate to get block hash"
+ )
+ return block_number, difficulty, block_hash
+
+
+def _registration_diff_unpack(packed_diff: Array) -> int:
+ """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian."""
+ return int(packed_diff[0] << 32 | packed_diff[1])
+
+
+def _registration_diff_pack(diff: int, packed_diff: Array):
+ """Packs the difficulty into two 32-bit integers. Little endian."""
+ packed_diff[0] = diff >> 32
+ packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits
+
+
+class _UsingSpawnStartMethod:
+ def __init__(self, force: bool = False):
+ self._old_start_method = None
+ self._force = force
+
+ def __enter__(self):
+ self._old_start_method = mp.get_start_method(allow_none=True)
+ if self._old_start_method is None:
+ self._old_start_method = "spawn" # default to spawn
+
+ mp.set_start_method("spawn", force=self._force)
+
+ def __exit__(self, *args):
+ # restore the old start method
+ mp.set_start_method(self._old_start_method, force=True)
+
+
+async def create_pow(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ netuid: int,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[list[int], int] = 0,
+ tpb: int = 256,
+ num_processes: int = None,
+ update_interval: int = None,
+ log_verbose: bool = False,
+) -> Optional[dict[str, Any]]:
+ """
+ Creates a proof of work for the given subtensor and wallet.
+
+ Args:
+ subtensor: The subtensor to create a proof of work for.
+ wallet: The wallet to create a proof of work for.
+ netuid: The netuid for the subnet to create a proof of work for.
+ output_in_place: If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines.
+ cuda: If true, uses CUDA to solve the proof of work.
+ dev_id: The CUDA device id(s) to use. If cuda is true and dev_id is a list, then multiple CUDA devices will be used to solve the proof of work.
+ tpb: The number of threads per block to use when solving the proof of work. Should be a multiple of 32.
+ num_processes: The number of processes to use when solving the proof of work. If None, then the number of processes is equal to the number of CPU cores.
+ update_interval: The number of nonces to run before checking for a new block.
+ log_verbose: If true, prints the progress of the proof of work more verbosely.
+
+ Returns:
+ The proof of work solution or None if the wallet is already registered or there is a different error.
+
+ Raises:
+ ValueError: If the subnet does not exist.
+ """
+ if netuid != -1:
+ if not await subtensor.subnet_exists(netuid=netuid):
+ raise ValueError(f"Subnet {netuid} does not exist")
+
+ if cuda:
+ solution: Optional[POWSolution] = await _solve_for_difficulty_fast_cuda(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ dev_id=dev_id,
+ tpb=tpb,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+ else:
+ solution: Optional[POWSolution] = await _solve_for_difficulty_fast(
+ subtensor,
+ wallet,
+ netuid=netuid,
+ output_in_place=output_in_place,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ return solution
+
+
+def _solve_for_nonce_block_cuda(
+ nonce_start: int,
+ update_interval: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+ dev_id: int,
+ tpb: int,
+) -> Optional[POWSolution]:
+ """
+ Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb
+ """
+ solution, seal = solve_cuda(
+ nonce_start,
+ update_interval,
+ tpb,
+ block_and_hotkey_hash_bytes,
+ difficulty,
+ limit,
+ dev_id,
+ )
+
+ if solution != -1:
+ # Check if solution is valid (i.e. not -1)
+ return POWSolution(solution, block_number, difficulty, seal)
+
+ return None
+
+
+def _solve_for_nonce_block(
+ nonce_start: int,
+ nonce_end: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ block_number: int,
+) -> Optional[POWSolution]:
+ """
+ Tries to solve the POW for a block of nonces (nonce_start, nonce_end)
+ """
+ for nonce in range(nonce_start, nonce_end):
+ # Create seal.
+ seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce)
+
+ # Check if seal meets difficulty
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ # Found a solution, save it.
+ return POWSolution(nonce, block_number, difficulty, seal)
+
+ return None
+
+
+class CUDAException(Exception):
+ """An exception raised when an error occurs in the CUDA environment."""
+
+
+def _hex_bytes_to_u8_list(hex_bytes: bytes):
+ hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)]
+ return hex_chunks
+
+
+def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes:
+ """
+ Create a cryptographic seal hash from the given block and hotkey hash bytes and nonce.
+
+ This function generates a seal hash by combining the given block and hotkey hash bytes with a nonce.
+ It first converts the nonce to a byte representation, then concatenates it with the first 64 hex characters of the block and hotkey hash bytes. The result is then hashed using SHA-256 followed by the Keccak-256 algorithm to produce the final seal hash.
+
+ Args:
+ block_and_hotkey_hash_bytes (bytes): The combined hash bytes of the block and hotkey.
+ nonce (int): The nonce value used for hashing.
+
+ Returns:
+ The resulting seal hash.
+ """
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little"))
+ pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+ seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest()
+ kec = keccak.new(digest_bits=256)
+ seal = kec.update(seal_sh256).digest()
+ return seal
+
+
+def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int) -> bool:
+ """Determines if a seal meets the specified difficulty"""
+ seal_number = int.from_bytes(seal, "big")
+ product = seal_number * difficulty
+ return product < limit
+
+
+def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes:
+ """Hashes the block with the hotkey using Keccak-256 to get 32 bytes"""
+ kec = keccak.new(digest_bits=256)
+ kec = kec.update(bytearray(block_bytes + hotkey_bytes))
+ block_and_hotkey_hash_bytes = kec.digest()
+ return block_and_hotkey_hash_bytes
+
+
+def _update_curr_block(
+ curr_diff: Array,
+ curr_block: Array,
+ curr_block_num: Value,
+ block_number: int,
+ block_bytes: bytes,
+ diff: int,
+ hotkey_bytes: bytes,
+ lock: Lock,
+):
+ """
+ Update the current block data with the provided block information and difficulty.
+
+ This function updates the current block and its difficulty in a thread-safe manner. It sets the current block
+ number, hashes the block with the hotkey, updates the current block bytes, and packs the difficulty.
+
+ curr_diff: Shared array to store the current difficulty.
+ curr_block: Shared array to store the current block data.
+ curr_block_num: Shared value to store the current block number.
+ block_number: The block number to set as the current block number.
+ block_bytes: The block data bytes to be hashed with the hotkey.
+ diff: The difficulty value to be packed into the current difficulty array.
+ hotkey_bytes: The hotkey bytes used for hashing the block.
+ lock: A lock to ensure thread-safe updates.
+ """
+ with lock:
+ curr_block_num.value = block_number
+ # Hash the block with the hotkey
+ block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes)
+ for i in range(32):
+ curr_block[i] = block_and_hotkey_hash_bytes[i]
+ _registration_diff_pack(diff, curr_diff)
+
+
+def get_cpu_count() -> int:
+ try:
+ return len(os.sched_getaffinity(0))
+ except AttributeError:
+ # macOS does not have sched_getaffinity
+ return os.cpu_count()
+
+
+@dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: bytes
+
+
+def solve_cuda(
+ nonce_start: np.int64,
+ update_interval: np.int64,
+ tpb: int,
+ block_and_hotkey_hash_bytes: bytes,
+ difficulty: int,
+ limit: int,
+ dev_id: int = 0,
+) -> tuple[np.int64, bytes]:
+ """
+ Solves the PoW problem using CUDA.
+
+ nonce_start: Starting nonce.
+ update_interval: Number of nonces to solve before updating block information.
+ tpb: Threads per block.
+ block_and_hotkey_hash_bytes: Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes.
+ difficulty: Difficulty of the PoW problem.
+ limit: Upper limit of the nonce.
+ dev_id: The CUDA device ID
+
+ :return: (nonce, seal) corresponding to the solution. Returns -1 for nonce if no solution is found.
+ """
+
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ upper = int(limit // difficulty)
+
+ upper_bytes = upper.to_bytes(32, byteorder="little", signed=False)
+
+ # Call cython function
+ # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit,
+ # const unsigned char[:] block_bytes, int dev_id
+ block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64]
+
+ solution = cubit.solve_cuda(
+ tpb,
+ nonce_start,
+ update_interval,
+ upper_bytes,
+ block_and_hotkey_hash_hex,
+ dev_id,
+ ) # 0 is first GPU
+ seal = None
+ if solution != -1:
+ seal = _create_seal_hash(block_and_hotkey_hash_hex, solution)
+ if _seal_meets_difficulty(seal, difficulty, limit):
+ return solution, seal
+ else:
+ return -1, b"\x00" * 32
+
+ return solution, seal
+
+
+def reset_cuda():
+ """
+ Resets the CUDA environment.
+ """
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ cubit.reset_cuda()
+
+
+def log_cuda_errors() -> str:
+ """
+ Logs any CUDA errors.
+ """
+ try:
+ import cubit
+ except ImportError:
+ raise ImportError("Please install cubit")
+
+ f = io.StringIO()
+ with redirect_stdout(f):
+ cubit.log_cuda_errors()
+
+ s = f.getvalue()
+
+ return s
diff --git a/bittensor/core/extrinsics/async_root.py b/bittensor/core/extrinsics/async_root.py
new file mode 100644
index 0000000000..2bf8a4c714
--- /dev/null
+++ b/bittensor/core/extrinsics/async_root.py
@@ -0,0 +1,232 @@
+import asyncio
+import time
+from typing import Union, TYPE_CHECKING
+
+import numpy as np
+from bittensor_wallet import Wallet
+from numpy.typing import NDArray
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.utils import u16_normalized_float, format_error_message, unlock_key
+from bittensor.utils.btlogging import logging
+from bittensor.utils.weight_utils import (
+ normalize_max_weight,
+ convert_weights_and_uids_for_emit,
+)
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def get_limits(subtensor: "AsyncSubtensor") -> tuple[int, float]:
+ """
+ Retrieves the minimum allowed weights and maximum weight limit for the given subnet.
+
+ These values are fetched asynchronously using `asyncio.gather` to run both requests concurrently.
+
+ Args:
+ subtensor (AsyncSubtensor): The AsyncSubtensor object used to interface with the network's substrate node.
+
+ Returns:
+ tuple[int, float]: A tuple containing:
+ - `min_allowed_weights` (int): The minimum allowed weights.
+ - `max_weight_limit` (float): The maximum weight limit, normalized to a float value.
+ """
+ # Get weight restrictions.
+ maw, mwl = await asyncio.gather(
+ subtensor.get_hyperparameter("MinAllowedWeights", netuid=0),
+ subtensor.get_hyperparameter("MaxWeightsLimit", netuid=0),
+ )
+ min_allowed_weights = int(maw)
+ max_weight_limit = u16_normalized_float(int(mwl))
+ return min_allowed_weights, max_weight_limit
+
+
+async def root_register_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = True,
+) -> bool:
+ """Registers the wallet to root network.
+
+ Arguments:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ netuid (int): Subnet uid.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`.
+ """
+
+ if not (unlock := unlock_key(wallet)).success:
+ logging.error(unlock.message)
+ return False
+
+ logging.debug(
+ f"Checking if hotkey ({wallet.hotkey_str}) is registered on root."
+ )
+ is_registered = await subtensor.is_hotkey_registered(
+ netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ )
+ if is_registered:
+ logging.error(
+ ":white_heavy_check_mark: Already registered on root network."
+ )
+ return True
+
+ logging.info(":satellite: Registering to root network...")
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="root_register",
+ call_params={"hotkey": wallet.hotkey.ss58_address},
+ )
+ success, err_msg = await subtensor.sign_and_send_extrinsic(
+ call,
+ wallet=wallet,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not success:
+ logging.error(f":cross_mark: Failed error: {err_msg}")
+ time.sleep(0.5)
+ return False
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ uid = await subtensor.substrate.query(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[netuid, wallet.hotkey.ss58_address],
+ )
+ if uid is not None:
+ logging.info(
+ f":white_heavy_check_mark: Registered with UID {uid}."
+ )
+ return True
+ else:
+ # neuron not found, try again
+ logging.error(":cross_mark: Unknown error. Neuron not found.")
+ return False
+
+
+async def set_root_weights_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuids: Union[NDArray[np.int64], list[int]],
+ weights: Union[NDArray[np.float32], list[float]],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> bool:
+ """Sets the given weights and values on chain for wallet hotkey account.
+
+ Arguments:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object.
+ netuids (Union[NDArray[np.int64], list[int]]): The `netuid` of the subnet to set weights for.
+ weights (Union[NDArray[np.float32], list[float]]): Weights to set. These must be `float` s and must correspond to the passed `netuid` s.
+ version_key (int): The version key of the validator.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`.
+ """
+
+ async def _do_set_weights():
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_root_weights",
+ call_params={
+ "dests": weight_uids,
+ "weights": weight_vals,
+ "netuid": 0,
+ "version_key": version_key,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.coldkey,
+ era={"period": 5},
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ await response.process_events()
+ if await response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, await response.error_message
+
+ my_uid = await subtensor.substrate.query(
+ "SubtensorModule", "Uids", [0, wallet.hotkey.ss58_address]
+ )
+
+ if my_uid is None:
+ logging.error("Your hotkey is not registered to the root network.")
+ return False
+
+ if not (unlock := unlock_key(wallet)).success:
+ logging.error(unlock.message)
+ return False
+
+ # First convert types.
+ if isinstance(netuids, list):
+ netuids = np.array(netuids, dtype=np.int64)
+ if isinstance(weights, list):
+ weights = np.array(weights, dtype=np.float32)
+
+ logging.debug("Fetching weight limits")
+ min_allowed_weights, max_weight_limit = await get_limits(subtensor)
+
+ # Get non zero values.
+ non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1)
+ non_zero_weights = weights[non_zero_weight_idx]
+ if non_zero_weights.size < min_allowed_weights:
+ raise ValueError(
+ "The minimum number of weights required to set weights is {}, got {}".format(
+ min_allowed_weights, non_zero_weights.size
+ )
+ )
+
+ # Normalize the weights to max value.
+ logging.info("Normalizing weights")
+ formatted_weights = normalize_max_weight(x=weights, limit=max_weight_limit)
+ logging.info(
+ f"Raw weights -> Normalized weights: {weights} -> {formatted_weights}"
+ )
+
+ try:
+ logging.info(":satellite: Setting root weights...")
+ weight_uids, weight_vals = convert_weights_and_uids_for_emit(netuids, weights)
+
+ success, error_message = await _do_set_weights()
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True
+
+ if success is True:
+ logging.info(":white_heavy_check_mark: Finalized")
+ return True
+ else:
+ fmt_err = format_error_message(error_message, subtensor.substrate)
+ logging.error(f":cross_mark: Failed error: {fmt_err}")
+ return False
+
+ except SubstrateRequestException as e:
+ fmt_err = format_error_message(e, subtensor.substrate)
+ logging.error(f":cross_mark: Failed error: {fmt_err}")
+ return False
diff --git a/bittensor/core/extrinsics/async_transfer.py b/bittensor/core/extrinsics/async_transfer.py
new file mode 100644
index 0000000000..e4190023d0
--- /dev/null
+++ b/bittensor/core/extrinsics/async_transfer.py
@@ -0,0 +1,187 @@
+import asyncio
+from typing import TYPE_CHECKING
+
+from bittensor_wallet import Wallet
+from substrateinterface.exceptions import SubstrateRequestException
+
+from bittensor.core.settings import NETWORK_EXPLORER_MAP
+from bittensor.utils import (
+ format_error_message,
+ get_explorer_url_for_network,
+ is_valid_bittensor_address_or_public_key,
+ unlock_key,
+)
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
+
+if TYPE_CHECKING:
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def transfer_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: Wallet,
+ destination: str,
+ amount: "Balance",
+ transfer_all: bool = False,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ keep_alive: bool = True,
+) -> bool:
+ """Transfers funds from this wallet to the destination public key address.
+
+ Args:
+ subtensor (bittensor.core.async_subtensor.AsyncSubtensor): initialized AsyncSubtensor object used for transfer
+ wallet (bittensor_wallet.Wallet): Bittensor wallet object to make transfer from.
+ destination (str): Destination public key address (ss58_address or ed25519) of recipient.
+ amount (bittensor.utils.balance.Balance): Amount to stake as Bittensor balance.
+ transfer_all (bool): Whether to transfer all funds from this wallet to the destination address.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout.
+ keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit.
+
+ Returns:
+ success (bool): Flag is `True` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is `True`, regardless of its inclusion.
+ """
+
+ async def get_transfer_fee() -> Balance:
+ """
+ Calculates the transaction fee for transferring tokens from a wallet to a specified destination address.
+ This function simulates the transfer to estimate the associated cost, taking into account the current
+ network conditions and transaction complexity.
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": destination, "value": amount.rao},
+ )
+
+ try:
+ payment_info = await subtensor.substrate.get_payment_info(
+ call=call, keypair=wallet.coldkeypub
+ )
+ except SubstrateRequestException as e:
+ payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao
+ logging.error(f":cross_mark: Failed to get payment info:")
+ logging.error(f"\t\t{format_error_message(e, subtensor.substrate)}")
+ logging.error(
+ f"\t\tDefaulting to default transfer fee: {payment_info['partialFee']}"
+ )
+
+ return Balance.from_rao(payment_info["partialFee"])
+
+ async def do_transfer() -> tuple[bool, str, str]:
+ """
+ Makes transfer from wallet to destination public key address.
+
+ Returns:
+ success, block hash, formatted error message
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": destination, "value": amount.rao},
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "", ""
+
+ # Otherwise continue with finalization.
+ await response.process_events()
+ if await response.is_success:
+ block_hash_ = response.block_hash
+ return True, block_hash_, ""
+ else:
+ return (
+ False,
+ "",
+ format_error_message(
+ await response.error_message, substrate=subtensor.substrate
+ ),
+ )
+
+ # Validate destination address.
+ if not is_valid_bittensor_address_or_public_key(destination):
+ logging.error(
+ f":cross_mark: Invalid destination SS58 address: {destination}"
+ )
+ return False
+ logging.info(f"Initiating transfer on network: {subtensor.network}")
+ # Unlock wallet coldkey.
+ if not (unlock := unlock_key(wallet)).success:
+ logging.error(unlock.message)
+ return False
+
+ # Check balance.
+ logging.info(
+ f":satellite: Checking balance and fees on chain {subtensor.network}"
+ )
+ # check existential deposit and fee
+ logging.debug("Fetching existential and fee")
+ block_hash = await subtensor.substrate.get_chain_head()
+ account_balance_, existential_deposit = await asyncio.gather(
+ subtensor.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash),
+ subtensor.get_existential_deposit(block_hash=block_hash),
+ )
+ account_balance = account_balance_[wallet.coldkeypub.ss58_address]
+ fee = await get_transfer_fee()
+
+ if not keep_alive:
+ # Check if the transfer should keep_alive the account
+ existential_deposit = Balance(0)
+
+ # Check if we have enough balance.
+ if transfer_all is True:
+ amount = account_balance - fee - existential_deposit
+ if amount < Balance(0):
+ logging.error("Not enough balance to transfer")
+ return False
+
+ if account_balance < (amount + fee + existential_deposit):
+ logging.error(":cross_mark: Not enough balance")
+ logging.error(f"\t\tBalance:\t{account_balance}")
+ logging.error(f"\t\tAmount:\t{amount}")
+ logging.error(f"\t\tFor fee:\t{fee}")
+ return False
+
+ logging.info(":satellite: Transferring...")
+ logging.info(f"[green]Block Hash: {block_hash}")
+
+ if subtensor.network == "finney":
+ logging.debug("Fetching explorer URLs")
+ explorer_urls = get_explorer_url_for_network(
+ subtensor.network, block_hash, NETWORK_EXPLORER_MAP
+ )
+ if explorer_urls != {} and explorer_urls:
+ logging.info(
+ f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}"
+ )
+ logging.info(
+ f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}"
+ )
+ else:
+ logging.error(f":cross_mark: Failed: {err_msg}")
+
+ if success:
+ logging.info(":satellite: Checking Balance...")
+ new_balance = await subtensor.get_balance(
+ wallet.coldkeypub.ss58_address, reuse_block=False
+ )
+ logging.info(
+ f"Balance: [blue]{account_balance} :arrow_right: [green]{new_balance[wallet.coldkeypub.ss58_address]}"
+ )
+ return True
+
+ return False
diff --git a/bittensor/core/extrinsics/async_weights.py b/bittensor/core/extrinsics/async_weights.py
new file mode 100644
index 0000000000..926ce94c2c
--- /dev/null
+++ b/bittensor/core/extrinsics/async_weights.py
@@ -0,0 +1,256 @@
+"""This module provides functionality for setting weights on the Bittensor network."""
+
+from typing import Union, TYPE_CHECKING, Optional
+
+import numpy as np
+from numpy.typing import NDArray
+
+import bittensor.utils.weight_utils as weight_utils
+from bittensor.core.settings import version_as_int
+from bittensor.utils import format_error_message
+from bittensor.utils.btlogging import logging
+from bittensor.utils.registration import torch, use_torch
+
+if TYPE_CHECKING:
+ from bittensor_wallet import Wallet
+ from bittensor.core.async_subtensor import AsyncSubtensor
+
+
+async def _do_set_weights(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ uids: list[int],
+ vals: list[int],
+ netuid: int,
+ version_key: int = version_as_int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, Optional[str]]: # (success, error_message)
+ """
+ Internal method to send a transaction to the Bittensor blockchain, setting weights
+ for specified neurons. This method constructs and submits the transaction, handling
+ retries and blockchain communication.
+
+ Args:
+ subtensor (subtensor.core.async_subtensor.AsyncSubtensor): Async Subtensor instance.
+ wallet (bittensor.wallet): The wallet associated with the neuron setting the weights.
+ uids (List[int]): List of neuron UIDs for which weights are being set.
+ vals (List[int]): List of weight values corresponding to each UID.
+ netuid (int): Unique identifier for the network.
+ version_key (int, optional): Version key for compatibility with the network.
+ wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block.
+ wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain.
+
+ Returns:
+ Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message.
+
+ This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their
+ trust in other neurons based on observed performance and contributions.
+ """
+
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_weights",
+ call_params={
+ "dests": uids,
+ "weights": vals,
+ "netuid": netuid,
+ "version_key": version_key,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.hotkey,
+ era={"period": 5},
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ await response.process_events()
+ if await response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, format_error_message(
+ response.error_message, substrate=subtensor.substrate
+ )
+
+
+async def set_weights_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ uids: Union[NDArray[np.int64], "torch.LongTensor", list],
+ weights: Union[NDArray[np.float32], "torch.FloatTensor", list],
+ version_key: int = 0,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, str]:
+ """Sets the given weights and values on chain for wallet hotkey account.
+
+ Args:
+ subtensor (bittensor.subtensor): Bittensor subtensor object.
+ wallet (bittensor.wallet): Bittensor wallet object.
+ netuid (int): The ``netuid`` of the subnet to set weights for.
+ uids (Union[NDArray[np.int64], torch.LongTensor, list]): The ``uint64`` uids of destination neurons.
+ weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The weights to set. These must be ``float`` s and correspond to the passed ``uid`` s.
+ version_key (int): The version key of the validator.
+ wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout.
+
+ Returns:
+ success (bool): Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``.
+ """
+ # First convert types.
+ if use_torch():
+ if isinstance(uids, list):
+ uids = torch.tensor(uids, dtype=torch.int64)
+ if isinstance(weights, list):
+ weights = torch.tensor(weights, dtype=torch.float32)
+ else:
+ if isinstance(uids, list):
+ uids = np.array(uids, dtype=np.int64)
+ if isinstance(weights, list):
+ weights = np.array(weights, dtype=np.float32)
+
+ # Reformat and normalize.
+ weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit(
+ uids, weights
+ )
+
+ logging.info(
+ ":satellite: Setting weights on {subtensor.network} ..."
+ )
+ try:
+ success, error_message = await _do_set_weights(
+ subtensor=subtensor,
+ wallet=wallet,
+ netuid=netuid,
+ uids=weight_uids,
+ vals=weight_vals,
+ version_key=version_key,
+ wait_for_finalization=wait_for_finalization,
+ wait_for_inclusion=wait_for_inclusion,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
+
+ if success is True:
+ message = "Successfully set weights and Finalized."
+ logging.success(f":white_heavy_check_mark: {message}")
+ return True, message
+ else:
+ logging.error(f"Failed set weights. Error: {error_message}")
+ return False, error_message
+
+ except Exception as error:
+ logging.error(f":cross_mark: Failed set weights. Error: {error}")
+ return False, str(error)
+
+
+async def _do_commit_weights(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ commit_hash: str,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, Optional[str]]:
+ """
+ Internal method to send a transaction to the Bittensor blockchain, committing the hash of a neuron's weights.
+ This method constructs and submits the transaction, handling retries and blockchain communication.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights.
+ netuid (int): The unique identifier of the subnet.
+ commit_hash (str): The hash of the neuron's weights to be committed.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
+
+ Returns:
+ tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message.
+
+ This method ensures that the weight commitment is securely recorded on the Bittensor blockchain, providing a verifiable record of the neuron's weight distribution at a specific point in time.
+ """
+ call = await subtensor.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="commit_weights",
+ call_params={
+ "netuid": netuid,
+ "commit_hash": commit_hash,
+ },
+ )
+ extrinsic = await subtensor.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.hotkey,
+ )
+ response = await subtensor.substrate.submit_extrinsic(
+ substrate=subtensor.substrate,
+ extrinsic=extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
+
+ await response.process_events()
+ if await response.is_success:
+ return True, None
+ else:
+ return False, format_error_message(
+ response.error_message, substrate=subtensor.substrate
+ )
+
+
+async def commit_weights_extrinsic(
+ subtensor: "AsyncSubtensor",
+ wallet: "Wallet",
+ netuid: int,
+ commit_hash: str,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+) -> tuple[bool, str]:
+ """
+ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet.
+ This function is a wrapper around the `do_commit_weights` method.
+
+ Args:
+ subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction.
+ wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights.
+ netuid (int): The unique identifier of the subnet.
+ commit_hash (str): The hash of the neuron's weights to be committed.
+ wait_for_inclusion (bool): Waits for the transaction to be included in a block.
+ wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
+
+ Returns:
+ tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string
+ value describing the success or potential error.
+
+ This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required.
+ """
+
+ success, error_message = await _do_commit_weights(
+ subtensor=subtensor,
+ wallet=wallet,
+ netuid=netuid,
+ commit_hash=commit_hash,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+
+ if success:
+ success_message = "Successfully committed weights."
+ logging.info(success_message)
+ return True, success_message
+ else:
+ logging.error(f"Failed to commit weights: {error_message}")
+ return False, error_message
diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py
index 3e69598c06..ef93a15d3e 100644
--- a/bittensor/core/extrinsics/commit_weights.py
+++ b/bittensor/core/extrinsics/commit_weights.py
@@ -19,8 +19,6 @@
from typing import Optional, TYPE_CHECKING
-from retry import retry
-
from bittensor.core.extrinsics.utils import submit_extrinsic
from bittensor.utils import format_error_message
from bittensor.utils.btlogging import logging
@@ -60,37 +58,33 @@ def do_commit_weights(
This method ensures that the weight commitment is securely recorded on the Bittensor blockchain, providing a verifiable record of the neuron's weight distribution at a specific point in time.
"""
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="commit_weights",
- call_params={
- "netuid": netuid,
- "commit_hash": commit_hash,
- },
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call,
- keypair=wallet.hotkey,
- )
- response = submit_extrinsic(
- substrate=self.substrate,
- extrinsic=extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- if not wait_for_finalization and not wait_for_inclusion:
- return True, None
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="commit_weights",
+ call_params={
+ "netuid": netuid,
+ "commit_hash": commit_hash,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.hotkey,
+ )
+ response = submit_extrinsic(
+ substrate=self.substrate,
+ extrinsic=extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
- response.process_events()
- if response.is_success:
- return True, None
- else:
- return False, response.error_message
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
- return make_substrate_call_with_retry()
+ response.process_events()
+ if response.is_success:
+ return True, None
+ else:
+ return False, response.error_message
def commit_weights_extrinsic(
@@ -133,7 +127,9 @@ def commit_weights_extrinsic(
logging.info(success_message)
return True, success_message
else:
- error_message = format_error_message(error_message)
+ error_message = format_error_message(
+ error_message, substrate=subtensor.substrate
+ )
logging.error(f"Failed to commit weights: {error_message}")
return False, error_message
@@ -172,40 +168,36 @@ def do_reveal_weights(
This method ensures that the weight revelation is securely recorded on the Bittensor blockchain, providing transparency and accountability for the neuron's weight distribution.
"""
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="reveal_weights",
- call_params={
- "netuid": netuid,
- "uids": uids,
- "values": values,
- "salt": salt,
- "version_key": version_key,
- },
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call,
- keypair=wallet.hotkey,
- )
- response = submit_extrinsic(
- substrate=self.substrate,
- extrinsic=extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- if not wait_for_finalization and not wait_for_inclusion:
- return True, None
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="reveal_weights",
+ call_params={
+ "netuid": netuid,
+ "uids": uids,
+ "values": values,
+ "salt": salt,
+ "version_key": version_key,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.hotkey,
+ )
+ response = submit_extrinsic(
+ substrate=self.substrate,
+ extrinsic=extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
- response.process_events()
- if response.is_success:
- return True, None
- else:
- return False, response.error_message
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
- return make_substrate_call_with_retry()
+ response.process_events()
+ if response.is_success:
+ return True, None
+ else:
+ return False, response.error_message
def reveal_weights_extrinsic(
@@ -257,6 +249,8 @@ def reveal_weights_extrinsic(
logging.info(success_message)
return True, success_message
else:
- error_message = format_error_message(error_message)
+ error_message = format_error_message(
+ error_message, substrate=subtensor.substrate
+ )
logging.error(f"Failed to reveal weights: {error_message}")
return False, error_message
diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py
index 97c7332074..57bf9e7a56 100644
--- a/bittensor/core/extrinsics/registration.py
+++ b/bittensor/core/extrinsics/registration.py
@@ -18,10 +18,8 @@
import time
from typing import Union, Optional, TYPE_CHECKING
-from bittensor_wallet.errors import KeyFileError
-from retry import retry
-from bittensor.utils import format_error_message
+from bittensor.utils import format_error_message, unlock_key
from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
from bittensor.utils.registration import (
@@ -59,44 +57,39 @@ def _do_pow_register(
success (bool): ``True`` if the extrinsic was included in a block.
error (Optional[str]): ``None`` on success or not waiting for inclusion/finalization, otherwise the error message.
"""
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="register",
+ call_params={
+ "netuid": netuid,
+ "block_number": pow_result.block_number,
+ "nonce": pow_result.nonce,
+ "work": [int(byte_) for byte_ in pow_result.seal],
+ "hotkey": wallet.hotkey.ss58_address,
+ "coldkey": wallet.coldkeypub.ss58_address,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(call=call, keypair=wallet.hotkey)
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- # create extrinsic call
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="register",
- call_params={
- "netuid": netuid,
- "block_number": pow_result.block_number,
- "nonce": pow_result.nonce,
- "work": [int(byte_) for byte_ in pow_result.seal],
- "hotkey": wallet.hotkey.ss58_address,
- "coldkey": wallet.coldkeypub.ss58_address,
- },
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call, keypair=wallet.hotkey
- )
- response = self.substrate.submit_extrinsic(
- extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- # We only wait here if we expect finalization.
- if not wait_for_finalization and not wait_for_inclusion:
- return True, None
-
- # process if registration successful, try again if pow is still valid
- response.process_events()
- if not response.is_success:
- return False, format_error_message(response.error_message)
- # Successful registration
- else:
- return True, None
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
- return make_substrate_call_with_retry()
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
+ # Successful registration
+ else:
+ return True, None
def register_extrinsic(
@@ -295,39 +288,37 @@ def _do_burned_register(
Tuple[bool, Optional[str]]: A tuple containing a boolean indicating success or failure, and an optional error message.
"""
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- # create extrinsic call
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="burned_register",
- call_params={
- "netuid": netuid,
- "hotkey": wallet.hotkey.ss58_address,
- },
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call, keypair=wallet.coldkey
- )
- response = self.substrate.submit_extrinsic(
- extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="burned_register",
+ call_params={
+ "netuid": netuid,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
- # We only wait here if we expect finalization.
- if not wait_for_finalization and not wait_for_inclusion:
- return True, None
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
- # process if registration successful, try again if pow is still valid
- response.process_events()
- if not response.is_success:
- return False, format_error_message(response.error_message)
- # Successful registration
- else:
- return True, None
-
- return make_substrate_call_with_retry()
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
+ # Successful registration
+ else:
+ return True, None
def burned_register_extrinsic(
@@ -355,13 +346,10 @@ def burned_register_extrinsic(
)
return False
- try:
- wallet.unlock_coldkey()
- except KeyFileError:
- logging.error(
- ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
- )
+ if not (unlock := unlock_key(wallet)).success:
+ logging.error(unlock.message)
return False
+
logging.info(
f":satellite: Checking Account on subnet {netuid} ..."
)
diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py
index 616c46f958..fc3d1852a1 100644
--- a/bittensor/core/extrinsics/root.py
+++ b/bittensor/core/extrinsics/root.py
@@ -2,12 +2,10 @@
from typing import Optional, Union, TYPE_CHECKING
import numpy as np
-from bittensor_wallet.errors import KeyFileError
from numpy.typing import NDArray
-from retry import retry
from bittensor.core.settings import version_as_int
-from bittensor.utils import format_error_message, weight_utils
+from bittensor.utils import format_error_message, weight_utils, unlock_key
from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
from bittensor.utils.registration import torch, legacy_torch_api_compat
@@ -24,36 +22,34 @@ def _do_root_register(
wait_for_inclusion: bool = False,
wait_for_finalization: bool = True,
) -> tuple[bool, Optional[str]]:
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- # create extrinsic call
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="root_register",
- call_params={"hotkey": wallet.hotkey.ss58_address},
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call, keypair=wallet.coldkey
- )
- response = self.substrate.submit_extrinsic(
- extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
-
- # We only wait here if we expect finalization.
- if not wait_for_finalization and not wait_for_inclusion:
- return True
+ # create extrinsic call
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="root_register",
+ call_params={"hotkey": wallet.hotkey.ss58_address},
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
- # process if registration successful, try again if pow is still valid
- response.process_events()
- if not response.is_success:
- return False, format_error_message(response.error_message)
- # Successful registration
- else:
- return True, None
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None
- return make_substrate_call_with_retry()
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
+ # Successful registration
+ else:
+ return True, None
def root_register_extrinsic(
@@ -74,12 +70,8 @@ def root_register_extrinsic(
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
"""
- try:
- wallet.unlock_coldkey()
- except KeyFileError:
- logging.error(
- "Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
- )
+ if not (unlock := unlock_key(wallet)).success:
+ logging.error(unlock.message)
return False
is_registered = subtensor.is_hotkey_registered(
@@ -145,41 +137,37 @@ def _do_set_root_weights(
This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their trust in other neurons based on observed performance and contributions on the root network.
"""
- @retry(delay=2, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="set_root_weights",
- call_params={
- "dests": uids,
- "weights": vals,
- "netuid": netuid,
- "version_key": version_key,
- "hotkey": wallet.hotkey.ss58_address,
- },
- )
- # Period dictates how long the extrinsic will stay as part of waiting pool
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call,
- keypair=wallet.coldkey,
- era={"period": 5},
- )
- response = self.substrate.submit_extrinsic(
- extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
- # We only wait here if we expect finalization.
- if not wait_for_finalization and not wait_for_inclusion:
- return True, "Not waiting for finalziation or inclusion."
-
- response.process_events()
- if response.is_success:
- return True, "Successfully set weights."
- else:
- return False, response.error_message
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_root_weights",
+ call_params={
+ "dests": uids,
+ "weights": vals,
+ "netuid": netuid,
+ "version_key": version_key,
+ "hotkey": wallet.hotkey.ss58_address,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.coldkey,
+ era={"period": 5},
+ )
+ response = self.substrate.submit_extrinsic(
+ extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalziation or inclusion."
- return make_substrate_call_with_retry()
+ response.process_events()
+ if response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, response.error_message
@legacy_torch_api_compat
@@ -206,13 +194,8 @@ def set_root_weights_extrinsic(
Returns:
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
"""
-
- try:
- wallet.unlock_coldkey()
- except KeyFileError:
- logging.error(
- ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid."
- )
+ if not (unlock := unlock_key(wallet)).success:
+ logging.error(unlock.message)
return False
# First convert types.
diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py
index f9cb788172..02630ff91c 100644
--- a/bittensor/core/extrinsics/serving.py
+++ b/bittensor/core/extrinsics/serving.py
@@ -17,12 +17,10 @@
from typing import Optional, TYPE_CHECKING
-from retry import retry
-
from bittensor.core.errors import MetadataError
from bittensor.core.extrinsics.utils import submit_extrinsic
from bittensor.core.settings import version_as_int
-from bittensor.utils import format_error_message, networking as net
+from bittensor.utils import format_error_message, networking as net, unlock_key
from bittensor.utils.btlogging import logging
from bittensor.utils.networking import ensure_connected
@@ -59,32 +57,26 @@ def do_serve_axon(
This function is crucial for initializing and announcing a neuron's ``Axon`` service on the network, enhancing the decentralized computation capabilities of Bittensor.
"""
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="serve_axon",
- call_params=call_params,
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call, keypair=wallet.hotkey
- )
- response = submit_extrinsic(
- substrate=self.substrate,
- extrinsic=extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
- if wait_for_inclusion or wait_for_finalization:
- response.process_events()
- if response.is_success:
- return True, None
- else:
- return False, response.error_message
- else:
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="serve_axon",
+ call_params=call_params,
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(call=call, keypair=wallet.hotkey)
+ response = submit_extrinsic(
+ substrate=self.substrate,
+ extrinsic=extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ if wait_for_inclusion or wait_for_finalization:
+ response.process_events()
+ if response.is_success:
return True, None
-
- return make_substrate_call_with_retry()
+ else:
+ return False, response.error_message
+ else:
+ return True, None
def serve_extrinsic(
@@ -117,7 +109,10 @@ def serve_extrinsic(
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
"""
# Decrypt hotkey
- wallet.unlock_hotkey()
+ if not (unlock := unlock_key(wallet, "hotkey")).success:
+ logging.error(unlock.message)
+ return False
+
params: "AxonServeCallParams" = {
"version": version_as_int,
"ip": net.ip_to_int(ip),
@@ -173,7 +168,9 @@ def serve_extrinsic(
)
return True
else:
- logging.error(f"Failed: {format_error_message(error_message)}")
+ logging.error(
+ f"Failed: {format_error_message(error_message, substrate=subtensor.substrate)}"
+ )
return False
else:
return True
@@ -198,8 +195,9 @@ def serve_axon_extrinsic(
Returns:
success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``.
"""
- axon.wallet.unlock_hotkey()
- axon.wallet.unlock_coldkeypub()
+ if not (unlock := unlock_key(axon.wallet, "hotkey")).success:
+ logging.error(unlock.message)
+ return False
external_port = axon.external_port
# ---- Get external ip ----
@@ -260,7 +258,9 @@ def publish_metadata(
MetadataError: If there is an error in submitting the extrinsic or if the response from the blockchain indicates failure.
"""
- wallet.unlock_hotkey()
+ if not (unlock := unlock_key(wallet, "hotkey")).success:
+ logging.error(unlock.message)
+ return False
with self.substrate as substrate:
call = substrate.compose_call(
@@ -285,21 +285,18 @@ def publish_metadata(
if response.is_success:
return True
else:
- raise MetadataError(format_error_message(response.error_message))
+ raise MetadataError(
+ format_error_message(response.error_message, substrate=self.substrate)
+ )
# Community uses this function directly
@net.ensure_connected
def get_metadata(self, netuid: int, hotkey: str, block: Optional[int] = None) -> str:
- @retry(delay=2, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- with self.substrate as substrate:
- return substrate.query(
- module="Commitments",
- storage_function="CommitmentOf",
- params=[netuid, hotkey],
- block_hash=None if block is None else substrate.get_block_hash(block),
- )
-
- commit_data = make_substrate_call_with_retry()
- return commit_data.value
+ with self.substrate as substrate:
+ return substrate.query(
+ module="Commitments",
+ storage_function="CommitmentOf",
+ params=[netuid, hotkey],
+ block_hash=None if block is None else substrate.get_block_hash(block),
+ ).value
diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py
index 6de8e2338e..e9a7d88d35 100644
--- a/bittensor/core/extrinsics/set_weights.py
+++ b/bittensor/core/extrinsics/set_weights.py
@@ -15,12 +15,10 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-import logging
from typing import Union, Optional, TYPE_CHECKING
import numpy as np
from numpy.typing import NDArray
-from retry import retry
from bittensor.core.extrinsics.utils import submit_extrinsic
from bittensor.core.settings import version_as_int
@@ -46,7 +44,8 @@ def do_set_weights(
version_key: int = version_as_int,
wait_for_inclusion: bool = False,
wait_for_finalization: bool = False,
-) -> tuple[bool, Optional[dict]]: # (success, error_message)
+ period: int = 5,
+) -> tuple[bool, Optional[str]]: # (success, error_message)
"""
Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons. This method constructs and submits the transaction, handling retries and blockchain communication.
@@ -59,6 +58,7 @@ def do_set_weights(
version_key (int): Version key for compatibility with the network.
wait_for_inclusion (bool): Waits for the transaction to be included in a block.
wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain.
+ period (int): Period dictates how long the extrinsic will stay as part of waiting pool.
Returns:
tuple[bool, Optional[str]]: A tuple containing a success flag and an optional response message.
@@ -66,41 +66,39 @@ def do_set_weights(
This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their trust in other neurons based on observed performance and contributions.
"""
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="SubtensorModule",
- call_function="set_weights",
- call_params={
- "dests": uids,
- "weights": vals,
- "netuid": netuid,
- "version_key": version_key,
- },
- )
- # Period dictates how long the extrinsic will stay as part of waiting pool
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call,
- keypair=wallet.hotkey,
- era={"period": 5},
- )
- response = submit_extrinsic(
- substrate=self.substrate,
- extrinsic=extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
- # We only wait here if we expect finalization.
- if not wait_for_finalization and not wait_for_inclusion:
- return True, "Not waiting for finalization or inclusion."
-
- response.process_events()
- if response.is_success:
- return True, "Successfully set weights."
- else:
- return False, response.error_message
+ call = self.substrate.compose_call(
+ call_module="SubtensorModule",
+ call_function="set_weights",
+ call_params={
+ "dests": uids,
+ "weights": vals,
+ "netuid": netuid,
+ "version_key": version_key,
+ },
+ )
+ # Period dictates how long the extrinsic will stay as part of waiting pool
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call,
+ keypair=wallet.hotkey,
+ era={"period": period},
+ )
+ response = submit_extrinsic(
+ substrate=self.substrate,
+ extrinsic=extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, "Not waiting for finalization or inclusion."
- return make_substrate_call_with_retry()
+ response.process_events()
+ if response.is_success:
+ return True, "Successfully set weights."
+ else:
+ return False, format_error_message(
+ response.error_message, substrate=self.substrate
+ )
# Community uses this extrinsic directly and via `subtensor.set_weights`
@@ -170,7 +168,6 @@ def set_weights_extrinsic(
logging.success(f"Finalized! Set weights: {str(success)}")
return True, "Successfully set weights and Finalized."
else:
- error_message = format_error_message(error_message)
logging.error(error_message)
return False, error_message
diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py
index 2fea50dd6c..abadf8eee0 100644
--- a/bittensor/core/extrinsics/transfer.py
+++ b/bittensor/core/extrinsics/transfer.py
@@ -17,14 +17,13 @@
from typing import Optional, Union, TYPE_CHECKING
-from retry import retry
-
from bittensor.core.extrinsics.utils import submit_extrinsic
from bittensor.core.settings import NETWORK_EXPLORER_MAP
from bittensor.utils import (
get_explorer_url_for_network,
format_error_message,
is_valid_bittensor_address_or_public_key,
+ unlock_key,
)
from bittensor.utils.balance import Balance
from bittensor.utils.btlogging import logging
@@ -62,35 +61,31 @@ def do_transfer(
error (dict): Error message from subtensor if transfer failed.
"""
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- call = self.substrate.compose_call(
- call_module="Balances",
- call_function="transfer_allow_death",
- call_params={"dest": dest, "value": transfer_balance.rao},
- )
- extrinsic = self.substrate.create_signed_extrinsic(
- call=call, keypair=wallet.coldkey
- )
- response = submit_extrinsic(
- substrate=self.substrate,
- extrinsic=extrinsic,
- wait_for_inclusion=wait_for_inclusion,
- wait_for_finalization=wait_for_finalization,
- )
- # We only wait here if we expect finalization.
- if not wait_for_finalization and not wait_for_inclusion:
- return True, None, None
-
- # Otherwise continue with finalization.
- response.process_events()
- if response.is_success:
- block_hash = response.block_hash
- return True, block_hash, None
- else:
- return False, None, response.error_message
-
- return make_substrate_call_with_retry()
+ call = self.substrate.compose_call(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={"dest": dest, "value": transfer_balance.rao},
+ )
+ extrinsic = self.substrate.create_signed_extrinsic(
+ call=call, keypair=wallet.coldkey
+ )
+ response = submit_extrinsic(
+ substrate=self.substrate,
+ extrinsic=extrinsic,
+ wait_for_inclusion=wait_for_inclusion,
+ wait_for_finalization=wait_for_finalization,
+ )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ return True, None, None
+
+ # Otherwise continue with finalization.
+ response.process_events()
+ if response.is_success:
+ block_hash = response.block_hash
+ return True, block_hash, None
+ else:
+ return False, None, response.error_message
# Community uses this extrinsic directly and via `subtensor.transfer`
@@ -126,8 +121,9 @@ def transfer_extrinsic(
# Convert bytes to hex string.
dest = "0x" + dest.hex()
- # Unlock wallet coldkey.
- wallet.unlock_coldkey()
+ if not (unlock := unlock_key(wallet)).success:
+ logging.error(unlock.message)
+ return False
# Convert to bittensor.Balance
if not isinstance(amount, Balance):
@@ -189,7 +185,7 @@ def transfer_extrinsic(
)
else:
logging.error(
- f":cross_mark: Failed: {format_error_message(error_message)}"
+ f":cross_mark: Failed: {format_error_message(error_message, substrate=subtensor.substrate)}"
)
if success:
diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py
index 48995c83e7..d3a8a43a3c 100644
--- a/bittensor/core/settings.py
+++ b/bittensor/core/settings.py
@@ -15,7 +15,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
-__version__ = "8.2.1"
+__version__ = "8.3.0"
import os
import re
@@ -30,11 +30,6 @@
WALLETS_DIR = USER_BITTENSOR_DIR / "wallets"
MINERS_DIR = USER_BITTENSOR_DIR / "miners"
-# Bittensor networks name
-NETWORKS = ["local", "finney", "test", "archive"]
-
-DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443"
-DEFAULT_NETWORK = NETWORKS[1]
# Create dirs if they don't exist
WALLETS_DIR.mkdir(parents=True, exist_ok=True)
diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py
index 70da83f63a..67888cd999 100644
--- a/bittensor/core/subtensor.py
+++ b/bittensor/core/subtensor.py
@@ -16,7 +16,7 @@
# DEALINGS IN THE SOFTWARE.
"""
-The ``bittensor.core.subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor
+The ``bittensor.core.subtensor.Subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor
blockchain, facilitating a range of operations essential for the decentralized machine learning network.
"""
@@ -30,7 +30,6 @@
import scalecodec
from bittensor_wallet import Wallet
from numpy.typing import NDArray
-from retry import retry
from scalecodec.base import RuntimeConfiguration
from scalecodec.exceptions import RemainingScaleBytesNotEmptyException
from scalecodec.type_registry import load_type_registry_preset
@@ -390,15 +389,15 @@ def add_args(cls, parser: "argparse.ArgumentParser", prefix: Optional[str] = Non
@networking.ensure_connected
def _encode_params(
self,
- call_definition: list["ParamWithTypes"],
+ call_definition: dict[str, list["ParamWithTypes"]],
params: Union[list[Any], dict[str, Any]],
) -> str:
"""Returns a hex encoded string of the params using their types."""
param_data = scalecodec.ScaleBytes(b"")
- for i, param in enumerate(call_definition["params"]): # type: ignore
+ for i, param in enumerate(call_definition["params"]):
scale_obj = self.substrate.create_scale_object(param["type"])
- if type(params) is list:
+ if isinstance(params, list):
param_data += scale_obj.encode(params[i])
else:
if param["name"] not in params:
@@ -450,18 +449,14 @@ def query_subtensor(
This query function is essential for accessing detailed information about the network and its neurons, providing valuable insights into the state and dynamics of the Bittensor ecosystem.
"""
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry() -> "ScaleType":
- return self.substrate.query(
- module="SubtensorModule",
- storage_function=name,
- params=params,
- block_hash=(
- None if block is None else self.substrate.get_block_hash(block)
- ),
- )
-
- return make_substrate_call_with_retry()
+ return self.substrate.query(
+ module="SubtensorModule",
+ storage_function=name,
+ params=params,
+ block_hash=(
+ None if block is None else self.substrate.get_block_hash(block)
+ ),
+ )
@networking.ensure_connected
def query_map_subtensor(
@@ -480,19 +475,14 @@ def query_map_subtensor(
This function is particularly useful for analyzing and understanding complex network structures and relationships within the Bittensor ecosystem, such as inter-neuronal connections and stake distributions.
"""
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry():
- return self.substrate.query_map(
- module="SubtensorModule",
- storage_function=name,
- params=params,
- block_hash=(
- None if block is None else self.substrate.get_block_hash(block)
- ),
- )
-
- return make_substrate_call_with_retry()
+ return self.substrate.query_map(
+ module="SubtensorModule",
+ storage_function=name,
+ params=params,
+ block_hash=(
+ None if block is None else self.substrate.get_block_hash(block)
+ ),
+ )
def query_runtime_api(
self,
@@ -563,16 +553,11 @@ def state_call(
The state call function provides a more direct and flexible way of querying blockchain data, useful for specific use cases where standard queries are insufficient.
"""
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry() -> dict[Any, Any]:
- block_hash = None if block is None else self.substrate.get_block_hash(block)
- return self.substrate.rpc_request(
- method="state_call",
- params=[method, data, block_hash] if block_hash else [method, data],
- )
-
- return make_substrate_call_with_retry()
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
+ return self.substrate.rpc_request(
+ method="state_call",
+ params=[method, data, block_hash] if block_hash else [method, data],
+ )
@networking.ensure_connected
def query_map(
@@ -596,19 +581,14 @@ def query_map(
This function is particularly useful for retrieving detailed and structured data from various blockchain modules, offering insights into the network's state and the relationships between its different components.
"""
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry() -> "QueryMapResult":
- return self.substrate.query_map(
- module=module,
- storage_function=name,
- params=params,
- block_hash=(
- None if block is None else self.substrate.get_block_hash(block)
- ),
- )
-
- return make_substrate_call_with_retry()
+ return self.substrate.query_map(
+ module=module,
+ storage_function=name,
+ params=params,
+ block_hash=(
+ None if block is None else self.substrate.get_block_hash(block)
+ ),
+ )
@networking.ensure_connected
def query_constant(
@@ -627,18 +607,13 @@ def query_constant(
Constants queried through this function can include critical network parameters such as inflation rates, consensus rules, or validation thresholds, providing a deeper understanding of the Bittensor network's operational parameters.
"""
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry():
- return self.substrate.get_constant(
- module_name=module_name,
- constant_name=constant_name,
- block_hash=(
- None if block is None else self.substrate.get_block_hash(block)
- ),
- )
-
- return make_substrate_call_with_retry()
+ return self.substrate.get_constant(
+ module_name=module_name,
+ constant_name=constant_name,
+ block_hash=(
+ None if block is None else self.substrate.get_block_hash(block)
+ ),
+ )
@networking.ensure_connected
def query_module(
@@ -662,19 +637,14 @@ def query_module(
This versatile query function is key to accessing a wide range of data and insights from different parts of the Bittensor blockchain, enhancing the understanding and analysis of the network's state and dynamics.
"""
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry() -> "ScaleType":
- return self.substrate.query(
- module=module,
- storage_function=name,
- params=params,
- block_hash=(
- None if block is None else self.substrate.get_block_hash(block)
- ),
- )
-
- return make_substrate_call_with_retry()
+ return self.substrate.query(
+ module=module,
+ storage_function=name,
+ params=params,
+ block_hash=(
+ None if block is None else self.substrate.get_block_hash(block)
+ ),
+ )
# Common subtensor methods
def metagraph(
@@ -768,12 +738,7 @@ def get_current_block(self) -> int:
Knowing the current block number is essential for querying real-time data and performing time-sensitive operations on the blockchain. It serves as a reference point for network activities and data synchronization.
"""
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry():
- return self.substrate.get_block_number(None) # type: ignore
-
- return make_substrate_call_with_retry()
+ return self.substrate.get_block_number(None) # type: ignore
def is_hotkey_registered_any(
self, hotkey_ss58: str, block: Optional[int] = None
@@ -1221,18 +1186,15 @@ def neuron_for_uid(
if uid is None:
return NeuronInfo.get_null_neuron()
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry():
- block_hash = None if block is None else self.substrate.get_block_hash(block)
- params = [netuid, uid]
- if block_hash:
- params = params + [block_hash]
- return self.substrate.rpc_request(
- method="neuronInfo_getNeuron",
- params=params, # custom rpc method
- )
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
+ params = [netuid, uid]
+ if block_hash:
+ params = params + [block_hash]
- json_body = make_substrate_call_with_retry()
+ json_body = self.substrate.rpc_request(
+ method="neuronInfo_getNeuron",
+ params=params, # custom rpc method
+ )
if not (result := json_body.get("result", None)):
return NeuronInfo.get_null_neuron()
@@ -1270,7 +1232,7 @@ def get_subnet_hyperparameters(
else:
bytes_result = bytes.fromhex(hex_bytes_result)
- return SubnetHyperparameters.from_vec_u8(bytes_result) # type: ignore
+ return SubnetHyperparameters.from_vec_u8(bytes_result)
# Community uses this method
# Returns network ImmunityPeriod hyper parameter.
@@ -1445,17 +1407,12 @@ def get_all_subnets_info(self, block: Optional[int] = None) -> list[SubnetInfo]:
Gaining insights into the subnets' details assists in understanding the network's composition, the roles of different subnets, and their unique features.
"""
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry():
- block_hash = None if block is None else self.substrate.get_block_hash(block)
-
- return self.substrate.rpc_request(
- method="subnetInfo_getSubnetsInfo", # custom rpc method
- params=[block_hash] if block_hash else [],
- )
-
- json_body = make_substrate_call_with_retry()
+ json_body = self.substrate.rpc_request(
+ method="subnetInfo_getSubnetsInfo", # custom rpc method
+ params=[block_hash] if block_hash else [],
+ )
if not (result := json_body.get("result", None)):
return []
@@ -1653,25 +1610,21 @@ def get_balance(self, address: str, block: Optional[int] = None) -> "Balance":
This function is important for monitoring account holdings and managing financial transactions within the Bittensor ecosystem. It helps in assessing the economic status and capacity of network participants.
"""
try:
-
- @retry(delay=1, tries=3, backoff=2, max_delay=4, logger=logging)
- def make_substrate_call_with_retry():
- return self.substrate.query(
- module="System",
- storage_function="Account",
- params=[address],
- block_hash=(
- None if block is None else self.substrate.get_block_hash(block)
- ),
- )
-
- result = make_substrate_call_with_retry()
+ result = self.substrate.query(
+ module="System",
+ storage_function="Account",
+ params=[address],
+ block_hash=(
+ None if block is None else self.substrate.get_block_hash(block)
+ ),
+ )
except RemainingScaleBytesNotEmptyException:
logging.error(
"Received a corrupted message. This likely points to an error with the network or subnet."
)
return Balance(1000)
+
return Balance(result.value["data"]["free"])
# Used in community via `bittensor.core.subtensor.Subtensor.transfer`
@@ -1958,20 +1911,14 @@ def get_delegate_by_hotkey(
This function is essential for understanding the roles and influence of delegate neurons within the Bittensor network's consensus and governance structures.
"""
+ encoded_hotkey = ss58_to_vec_u8(hotkey_ss58)
- @retry(delay=1, tries=3, backoff=2, max_delay=4)
- def make_substrate_call_with_retry(encoded_hotkey_: list[int]):
- block_hash = None if block is None else self.substrate.get_block_hash(block)
-
- return self.substrate.rpc_request(
- method="delegateInfo_getDelegate", # custom rpc method
- params=(
- [encoded_hotkey_, block_hash] if block_hash else [encoded_hotkey_]
- ),
- )
+ block_hash = None if block is None else self.substrate.get_block_hash(block)
- encoded_hotkey = ss58_to_vec_u8(hotkey_ss58)
- json_body = make_substrate_call_with_retry(encoded_hotkey)
+ json_body = self.substrate.rpc_request(
+ method="delegateInfo_getDelegate", # custom rpc method
+ params=([encoded_hotkey, block_hash] if block_hash else [encoded_hotkey]),
+ )
if not (result := json_body.get("result", None)):
return None
diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py
index 6239d89808..5c89382987 100644
--- a/bittensor/utils/__init__.py
+++ b/bittensor/utils/__init__.py
@@ -15,8 +15,11 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+import ast
+from collections import namedtuple
import hashlib
-from typing import Literal, Union, Optional, TYPE_CHECKING
+from typing import Any, Literal, Union, Optional, TYPE_CHECKING
+from urllib.parse import urlparse
import scalecodec
from bittensor_wallet import Keypair
@@ -24,17 +27,23 @@
from bittensor.core.settings import SS58_FORMAT
from bittensor.utils.btlogging import logging
+from bittensor_wallet.errors import KeyFileError, PasswordError
from .registration import torch, use_torch
from .version import version_checking, check_version, VersionCheckError
if TYPE_CHECKING:
+ from bittensor.utils.async_substrate_interface import AsyncSubstrateInterface
from substrateinterface import SubstrateInterface
+ from bittensor_wallet import Wallet
RAOPERTAO = 1e9
U16_MAX = 65535
U64_MAX = 18446744073709551615
+UnlockStatus = namedtuple("UnlockStatus", ["success", "message"])
+
+
def ss58_to_vec_u8(ss58_address: str) -> list[int]:
ss58_bytes: bytes = ss58_address_to_bytes(ss58_address)
encoded_address: list[int] = [int(byte) for byte in ss58_bytes]
@@ -142,14 +151,16 @@ def get_hash(content, encoding="utf-8"):
def format_error_message(
- error_message: dict, substrate: "SubstrateInterface" = None
+ error_message: Union[dict, Exception],
+ substrate: Union["AsyncSubstrateInterface", "SubstrateInterface"],
) -> str:
"""
Formats an error message from the Subtensor error information for use in extrinsics.
Args:
- error_message (dict): A dictionary containing the error information from Subtensor.
- substrate (SubstrateInterface, optional): The substrate interface to use.
+ error_message: A dictionary containing the error information from Subtensor, or a SubstrateRequestException
+ containing dictionary literal args.
+ substrate: The initialised SubstrateInterface object to use.
Returns:
str: A formatted error message string.
@@ -158,6 +169,27 @@ def format_error_message(
err_type = "UnknownType"
err_description = "Unknown Description"
+ if isinstance(error_message, Exception):
+ # generally gotten through SubstrateRequestException args
+ new_error_message = None
+ for arg in error_message.args:
+ try:
+ d = ast.literal_eval(arg)
+ if isinstance(d, dict):
+ if "error" in d:
+ new_error_message = d["error"]
+ break
+ elif all(x in d for x in ["code", "message", "data"]):
+ new_error_message = d
+ break
+ except ValueError:
+ pass
+ if new_error_message is None:
+ return_val = " ".join(error_message.args)
+ return f"Subtensor returned: {return_val}"
+ else:
+ error_message = new_error_message
+
if isinstance(error_message, dict):
# subtensor error structure
if (
@@ -166,14 +198,11 @@ def format_error_message(
and error_message.get("data")
):
err_name = "SubstrateRequestException"
- err_type = error_message.get("message")
- err_data = error_message.get("data")
+ err_type = error_message.get("message", "")
+ err_data = error_message.get("data", "")
# subtensor custom error marker
if err_data.startswith("Custom error:") and substrate:
- if not substrate.metadata:
- substrate.get_metadata()
-
if substrate.metadata:
try:
pallet = substrate.metadata.get_metadata_pallet(
@@ -185,8 +214,10 @@ def format_error_message(
err_type = error_dict.get("message", err_type)
err_docs = error_dict.get("docs", [])
err_description = err_docs[0] if err_docs else err_description
- except Exception:
- logging.error("Substrate pallets data unavailable.")
+ except (AttributeError, IndexError):
+ logging.error(
+ "Substrate pallets data unavailable. This is usually caused by an uninitialized substrate."
+ )
else:
err_description = err_data
@@ -277,3 +308,98 @@ def is_valid_bittensor_address_or_public_key(address: Union[str, bytes]) -> bool
else:
# Invalid address type
return False
+
+
+def decode_hex_identity_dict(info_dictionary) -> dict[str, Any]:
+ """
+ Decodes hex-encoded strings in a dictionary.
+
+ This function traverses the given dictionary, identifies hex-encoded strings, and decodes them into readable strings. It handles nested dictionaries and lists within the dictionary.
+
+ Args:
+ info_dictionary (dict): The dictionary containing hex-encoded strings to decode.
+
+ Returns:
+ dict: The dictionary with decoded strings.
+
+ Examples:
+ input_dict = {
+ ... "name": {"value": "0x6a6f686e"},
+ ... "additional": [
+ ... [{"data": "0x64617461"}]
+ ... ]
+ ... }
+ decode_hex_identity_dict(input_dict)
+ {'name': 'john', 'additional': [('data', 'data')]}
+ """
+
+ def get_decoded(data: str) -> str:
+ """Decodes a hex-encoded string."""
+ try:
+ return bytes.fromhex(data[2:]).decode()
+ except UnicodeDecodeError:
+ print(f"Could not decode: {key}: {item}")
+
+ for key, value in info_dictionary.items():
+ if isinstance(value, dict):
+ item = list(value.values())[0]
+ if isinstance(item, str) and item.startswith("0x"):
+ try:
+ info_dictionary[key] = get_decoded(item)
+ except UnicodeDecodeError:
+ print(f"Could not decode: {key}: {item}")
+ else:
+ info_dictionary[key] = item
+ if key == "additional":
+ additional = []
+ for item in value:
+ additional.append(
+ tuple(
+ get_decoded(data=next(iter(sub_item.values())))
+ for sub_item in item
+ )
+ )
+ info_dictionary[key] = additional
+
+ return info_dictionary
+
+
+def validate_chain_endpoint(endpoint_url: str) -> tuple[bool, str]:
+ """Validates if the provided endpoint URL is a valid WebSocket URL."""
+ parsed = urlparse(endpoint_url)
+ if parsed.scheme not in ("ws", "wss"):
+ return False, (
+ f"Invalid URL or network name provided: ({endpoint_url}).\n"
+ "Allowed network names are finney, test, local. "
+ "Valid chain endpoints should use the scheme `ws` or `wss`.\n"
+ )
+ if not parsed.netloc:
+ return False, "Invalid URL passed as the endpoint"
+ return True, ""
+
+
+def unlock_key(wallet: "Wallet", unlock_type="coldkey") -> "UnlockStatus":
+ """
+ Attempts to decrypt a wallet's coldkey or hotkey
+ Args:
+ wallet: a Wallet object
+ unlock_type: the key type, 'coldkey' or 'hotkey'
+ Returns: UnlockStatus for success status of unlock, with error message if unsuccessful
+ """
+ if unlock_type == "coldkey":
+ unlocker = "unlock_coldkey"
+ elif unlock_type == "hotkey":
+ unlocker = "unlock_hotkey"
+ else:
+ raise ValueError(
+ f"Invalid unlock type provided: {unlock_type}. Must be 'coldkey' or 'hotkey'."
+ )
+ try:
+ getattr(wallet, unlocker)()
+ return UnlockStatus(True, "")
+ except PasswordError:
+ err_msg = f"The password used to decrypt your {unlock_type.capitalize()} keyfile is invalid."
+ return UnlockStatus(False, err_msg)
+ except KeyFileError:
+ err_msg = f"{unlock_type.capitalize()} keyfile is corrupt, non-writable, or non-readable, or non-existent."
+ return UnlockStatus(False, err_msg)
diff --git a/bittensor/utils/async_substrate_interface.py b/bittensor/utils/async_substrate_interface.py
new file mode 100644
index 0000000000..c3af691952
--- /dev/null
+++ b/bittensor/utils/async_substrate_interface.py
@@ -0,0 +1,2742 @@
+import asyncio
+import json
+import random
+from collections import defaultdict
+from dataclasses import dataclass
+from hashlib import blake2b
+from typing import Optional, Any, Union, Callable, Awaitable, cast
+
+import websockets
+from async_property import async_property
+from bittensor_wallet import Keypair
+from bt_decode import PortableRegistry, decode as decode_by_type_string, MetadataV15
+from packaging import version
+from scalecodec import GenericExtrinsic
+from scalecodec.base import ScaleBytes, ScaleType, RuntimeConfigurationObject
+from scalecodec.type_registry import load_type_registry_preset
+from scalecodec.types import GenericCall
+from substrateinterface.exceptions import (
+ SubstrateRequestException,
+ ExtrinsicNotFound,
+ BlockNotFound,
+)
+from substrateinterface.storage import StorageKey
+
+ResultHandler = Callable[[dict, Any], Awaitable[tuple[dict, bool]]]
+
+
+class TimeoutException(Exception):
+ pass
+
+
+def timeout_handler(signum, frame):
+ raise TimeoutException("Operation timed out")
+
+
+class ExtrinsicReceipt:
+ """
+ Object containing information of submitted extrinsic. Block hash where extrinsic is included is required
+ when retrieving triggered events or determine if extrinsic was successful
+ """
+
+ def __init__(
+ self,
+ substrate: "AsyncSubstrateInterface",
+ extrinsic_hash: Optional[str] = None,
+ block_hash: Optional[str] = None,
+ block_number: Optional[int] = None,
+ extrinsic_idx: Optional[int] = None,
+ finalized=None,
+ ):
+ """
+ Object containing information of submitted extrinsic. Block hash where extrinsic is included is required
+ when retrieving triggered events or determine if extrinsic was successful
+
+ Parameters
+ ----------
+ substrate
+ extrinsic_hash
+ block_hash
+ finalized
+ """
+ self.substrate = substrate
+ self.extrinsic_hash = extrinsic_hash
+ self.block_hash = block_hash
+ self.block_number = block_number
+ self.finalized = finalized
+
+ self.__extrinsic_idx = extrinsic_idx
+ self.__extrinsic = None
+
+ self.__triggered_events: Optional[list] = None
+ self.__is_success: Optional[bool] = None
+ self.__error_message = None
+ self.__weight = None
+ self.__total_fee_amount = None
+
+ async def get_extrinsic_identifier(self) -> str:
+ """
+ Returns the on-chain identifier for this extrinsic in format "[block_number]-[extrinsic_idx]" e.g. 134324-2
+ Returns
+ -------
+ str
+ """
+ if self.block_number is None:
+ if self.block_hash is None:
+ raise ValueError(
+ "Cannot create extrinsic identifier: block_hash is not set"
+ )
+
+ self.block_number = await self.substrate.get_block_number(self.block_hash)
+
+ if self.block_number is None:
+ raise ValueError(
+ "Cannot create extrinsic identifier: unknown block_hash"
+ )
+
+ return f"{self.block_number}-{await self.extrinsic_idx}"
+
+ async def retrieve_extrinsic(self):
+ if not self.block_hash:
+ raise ValueError(
+ "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is "
+ "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic"
+ )
+ # Determine extrinsic idx
+
+ block = await self.substrate.get_block(block_hash=self.block_hash)
+
+ extrinsics = block["extrinsics"]
+
+ if len(extrinsics) > 0:
+ if self.__extrinsic_idx is None:
+ self.__extrinsic_idx = self.__get_extrinsic_index(
+ block_extrinsics=extrinsics, extrinsic_hash=self.extrinsic_hash
+ )
+
+ if self.__extrinsic_idx >= len(extrinsics):
+ raise ExtrinsicNotFound()
+
+ self.__extrinsic = extrinsics[self.__extrinsic_idx]
+
+ @async_property
+ async def extrinsic_idx(self) -> int:
+ """
+ Retrieves the index of this extrinsic in containing block
+
+ Returns
+ -------
+ int
+ """
+ if self.__extrinsic_idx is None:
+ await self.retrieve_extrinsic()
+ return self.__extrinsic_idx
+
+ @async_property
+ async def triggered_events(self) -> list:
+ """
+ Gets triggered events for submitted extrinsic. block_hash where extrinsic is included is required, manually
+ set block_hash or use `wait_for_inclusion` when submitting extrinsic
+
+ Returns
+ -------
+ list
+ """
+ if self.__triggered_events is None:
+ if not self.block_hash:
+ raise ValueError(
+ "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is "
+ "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic"
+ )
+
+ if await self.extrinsic_idx is None:
+ await self.retrieve_extrinsic()
+
+ self.__triggered_events = []
+
+ for event in await self.substrate.get_events(block_hash=self.block_hash):
+ if event["extrinsic_idx"] == await self.extrinsic_idx:
+ self.__triggered_events.append(event)
+
+ return cast(list, self.__triggered_events)
+
+ async def process_events(self):
+ if await self.triggered_events:
+ self.__total_fee_amount = 0
+
+ # Process fees
+ has_transaction_fee_paid_event = False
+
+ for event in await self.triggered_events:
+ if (
+ event["event"]["module_id"] == "TransactionPayment"
+ and event["event"]["event_id"] == "TransactionFeePaid"
+ ):
+ self.__total_fee_amount = event["event"]["attributes"]["actual_fee"]
+ has_transaction_fee_paid_event = True
+
+ # Process other events
+ for event in await self.triggered_events:
+ # Check events
+ if (
+ event["event"]["module_id"] == "System"
+ and event["event"]["event_id"] == "ExtrinsicSuccess"
+ ):
+ self.__is_success = True
+ self.__error_message = None
+
+ if "dispatch_info" in event["event"]["attributes"]:
+ self.__weight = event["event"]["attributes"]["dispatch_info"][
+ "weight"
+ ]
+ else:
+ # Backwards compatibility
+ self.__weight = event["event"]["attributes"]["weight"]
+
+ elif (
+ event["event"]["module_id"] == "System"
+ and event["event"]["event_id"] == "ExtrinsicFailed"
+ ):
+ self.__is_success = False
+
+ dispatch_info = event["event"]["attributes"]["dispatch_info"]
+ dispatch_error = event["event"]["attributes"]["dispatch_error"]
+
+ self.__weight = dispatch_info["weight"]
+
+ if "Module" in dispatch_error:
+ module_index = dispatch_error["Module"][0]["index"]
+ error_index = int.from_bytes(
+ bytes(dispatch_error["Module"][0]["error"]),
+ byteorder="little",
+ signed=False,
+ )
+
+ if isinstance(error_index, str):
+ # Actual error index is first u8 in new [u8; 4] format
+ error_index = int(error_index[2:4], 16)
+ module_error = self.substrate.metadata.get_module_error(
+ module_index=module_index, error_index=error_index
+ )
+ self.__error_message = {
+ "type": "Module",
+ "name": module_error.name,
+ "docs": module_error.docs,
+ }
+ elif "BadOrigin" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "BadOrigin",
+ "docs": "Bad origin",
+ }
+ elif "CannotLookup" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "CannotLookup",
+ "docs": "Cannot lookup",
+ }
+ elif "Other" in dispatch_error:
+ self.__error_message = {
+ "type": "System",
+ "name": "Other",
+ "docs": "Unspecified error occurred",
+ }
+
+ elif not has_transaction_fee_paid_event:
+ if (
+ event["event"]["module_id"] == "Treasury"
+ and event["event"]["event_id"] == "Deposit"
+ ):
+ self.__total_fee_amount += event["event"]["attributes"]["value"]
+ elif (
+ event["event"]["module_id"] == "Balances"
+ and event["event"]["event_id"] == "Deposit"
+ ):
+ self.__total_fee_amount += event.value["attributes"]["amount"]
+
+ @async_property
+ async def is_success(self) -> bool:
+ """
+ Returns `True` if `ExtrinsicSuccess` event is triggered, `False` in case of `ExtrinsicFailed`
+ In case of False `error_message` will contain more details about the error
+
+
+ Returns
+ -------
+ bool
+ """
+ if self.__is_success is None:
+ await self.process_events()
+
+ return cast(bool, self.__is_success)
+
+ @async_property
+ async def error_message(self) -> Optional[dict]:
+ """
+ Returns the error message if the extrinsic failed in format e.g.:
+
+ `{'type': 'System', 'name': 'BadOrigin', 'docs': 'Bad origin'}`
+
+ Returns
+ -------
+ dict
+ """
+ if self.__error_message is None:
+ if await self.is_success:
+ return None
+ await self.process_events()
+ return self.__error_message
+
+ @async_property
+ async def weight(self) -> Union[int, dict]:
+ """
+ Contains the actual weight when executing this extrinsic
+
+ Returns
+ -------
+ int (WeightV1) or dict (WeightV2)
+ """
+ if self.__weight is None:
+ await self.process_events()
+ return self.__weight
+
+ @async_property
+ async def total_fee_amount(self) -> int:
+ """
+ Contains the total fee costs deducted when executing this extrinsic. This includes fee for the validator (
+ (`Balances.Deposit` event) and the fee deposited for the treasury (`Treasury.Deposit` event)
+
+ Returns
+ -------
+ int
+ """
+ if self.__total_fee_amount is None:
+ await self.process_events()
+ return cast(int, self.__total_fee_amount)
+
+ # Helper functions
+ @staticmethod
+ def __get_extrinsic_index(block_extrinsics: list, extrinsic_hash: str) -> int:
+ """
+ Returns the index of a provided extrinsic
+ """
+ for idx, extrinsic in enumerate(block_extrinsics):
+ if (
+ extrinsic.extrinsic_hash
+ and f"0x{extrinsic.extrinsic_hash.hex()}" == extrinsic_hash
+ ):
+ return idx
+ raise ExtrinsicNotFound()
+
+ # Backwards compatibility methods
+ def __getitem__(self, item):
+ return getattr(self, item)
+
+ def __iter__(self):
+ for item in self.__dict__.items():
+ yield item
+
+ def get(self, name):
+ return self[name]
+
+
+class QueryMapResult:
+ def __init__(
+ self,
+ records: list,
+ page_size: int,
+ substrate: "AsyncSubstrateInterface",
+ module: Optional[str] = None,
+ storage_function: Optional[str] = None,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ last_key: Optional[str] = None,
+ max_results: Optional[int] = None,
+ ignore_decoding_errors: bool = False,
+ ):
+ self.records = records
+ self.page_size = page_size
+ self.module = module
+ self.storage_function = storage_function
+ self.block_hash = block_hash
+ self.substrate = substrate
+ self.last_key = last_key
+ self.max_results = max_results
+ self.params = params
+ self.ignore_decoding_errors = ignore_decoding_errors
+ self.loading_complete = False
+ self._buffer = iter(self.records) # Initialize the buffer with initial records
+
+ async def retrieve_next_page(self, start_key) -> list:
+ result = await self.substrate.query_map(
+ module=self.module,
+ storage_function=self.storage_function,
+ params=self.params,
+ page_size=self.page_size,
+ block_hash=self.block_hash,
+ start_key=start_key,
+ max_results=self.max_results,
+ ignore_decoding_errors=self.ignore_decoding_errors,
+ )
+
+ # Update last key from new result set to use as offset for next page
+ self.last_key = result.last_key
+ return result.records
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ try:
+ # Try to get the next record from the buffer
+ return next(self._buffer)
+ except StopIteration:
+ # If no more records in the buffer, try to fetch the next page
+ if self.loading_complete:
+ raise StopAsyncIteration
+
+ next_page = await self.retrieve_next_page(self.last_key)
+ if not next_page:
+ self.loading_complete = True
+ raise StopAsyncIteration
+
+ # Update the buffer with the newly fetched records
+ self._buffer = iter(next_page)
+ return next(self._buffer)
+
+ def __getitem__(self, item):
+ return self.records[item]
+
+
+@dataclass
+class Preprocessed:
+ queryable: str
+ method: str
+ params: list
+ value_scale_type: str
+ storage_item: ScaleType
+
+
+class RuntimeCache:
+ blocks: dict[int, "Runtime"]
+ block_hashes: dict[str, "Runtime"]
+
+ def __init__(self):
+ self.blocks = {}
+ self.block_hashes = {}
+
+ def add_item(
+ self, block: Optional[int], block_hash: Optional[str], runtime: "Runtime"
+ ):
+ if block is not None:
+ self.blocks[block] = runtime
+ if block_hash is not None:
+ self.block_hashes[block_hash] = runtime
+
+ def retrieve(
+ self, block: Optional[int] = None, block_hash: Optional[str] = None
+ ) -> Optional["Runtime"]:
+ if block is not None:
+ return self.blocks.get(block)
+ elif block_hash is not None:
+ return self.block_hashes.get(block_hash)
+ else:
+ return None
+
+
+class Runtime:
+ block_hash: str
+ block_id: int
+ runtime_version = None
+ transaction_version = None
+ cache_region = None
+ metadata = None
+ type_registry_preset = None
+
+ def __init__(self, chain, runtime_config, metadata, type_registry):
+ self.runtime_config = RuntimeConfigurationObject()
+ self.config = {}
+ self.chain = chain
+ self.type_registry = type_registry
+ self.runtime_config = runtime_config
+ self.metadata = metadata
+
+ @property
+ def implements_scaleinfo(self) -> bool:
+ """
+ Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher)
+ """
+ if self.metadata:
+ return self.metadata.portable_registry is not None
+ else:
+ return False
+
+ def reload_type_registry(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ """
+ Reload type registry and preset used to instantiate the SubstrateInterface object. Useful to periodically apply
+ changes in type definitions when a runtime upgrade occurred
+
+ Parameters
+ ----------
+ use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed
+ scalecodec package
+ auto_discover
+
+ Returns
+ -------
+
+ """
+ self.runtime_config.clear_type_registry()
+
+ self.runtime_config.implements_scale_info = self.implements_scaleinfo
+
+ # Load metadata types in runtime configuration
+ self.runtime_config.update_type_registry(load_type_registry_preset(name="core"))
+ self.apply_type_registry_presets(
+ use_remote_preset=use_remote_preset, auto_discover=auto_discover
+ )
+
+ def apply_type_registry_presets(
+ self,
+ use_remote_preset: bool = True,
+ auto_discover: bool = True,
+ ):
+ """
+ Applies type registry presets to the runtime
+ :param use_remote_preset: bool, whether to use presets from remote
+ :param auto_discover: bool, whether to use presets from local installed scalecodec package
+ """
+ if self.type_registry_preset is not None:
+ # Load type registry according to preset
+ type_registry_preset_dict = load_type_registry_preset(
+ name=self.type_registry_preset, use_remote_preset=use_remote_preset
+ )
+
+ if not type_registry_preset_dict:
+ raise ValueError(
+ f"Type registry preset '{self.type_registry_preset}' not found"
+ )
+
+ elif auto_discover:
+ # Try to auto discover type registry preset by chain name
+ type_registry_name = self.chain.lower().replace(" ", "-")
+ try:
+ type_registry_preset_dict = load_type_registry_preset(
+ type_registry_name
+ )
+ self.type_registry_preset = type_registry_name
+ except ValueError:
+ type_registry_preset_dict = None
+
+ else:
+ type_registry_preset_dict = None
+
+ if type_registry_preset_dict:
+ # Load type registries in runtime configuration
+ if self.implements_scaleinfo is False:
+ # Only runtime with no embedded types in metadata need the default set of explicit defined types
+ self.runtime_config.update_type_registry(
+ load_type_registry_preset(
+ "legacy", use_remote_preset=use_remote_preset
+ )
+ )
+
+ if self.type_registry_preset != "legacy":
+ self.runtime_config.update_type_registry(type_registry_preset_dict)
+
+ if self.type_registry:
+ # Load type registries in runtime configuration
+ self.runtime_config.update_type_registry(self.type_registry)
+
+
+class RequestManager:
+ RequestResults = dict[Union[str, int], list[Union[ScaleType, dict]]]
+
+ def __init__(self, payloads):
+ self.response_map = {}
+ self.responses = defaultdict(lambda: {"complete": False, "results": []})
+ self.payloads_count = len(payloads)
+
+ def add_request(self, item_id: int, request_id: Any):
+ """
+ Adds an outgoing request to the responses map for later retrieval
+ """
+ self.response_map[item_id] = request_id
+
+ def overwrite_request(self, item_id: int, request_id: Any):
+ """
+ Overwrites an existing request in the responses map with a new request_id. This is used
+ for multipart responses that generate a subscription id we need to watch, rather than the initial
+ request_id.
+ """
+ self.response_map[request_id] = self.response_map.pop(item_id)
+ return request_id
+
+ def add_response(self, item_id: int, response: dict, complete: bool):
+ """
+ Maps a response to the request for later retrieval
+ """
+ request_id = self.response_map[item_id]
+ self.responses[request_id]["results"].append(response)
+ self.responses[request_id]["complete"] = complete
+
+ @property
+ def is_complete(self) -> bool:
+ """
+ Returns whether all requests in the manager have completed
+ """
+ return (
+ all(info["complete"] for info in self.responses.values())
+ and len(self.responses) == self.payloads_count
+ )
+
+ def get_results(self) -> RequestResults:
+ """
+ Generates a dictionary mapping the requests initiated to the responses received.
+ """
+ return {
+ request_id: info["results"] for request_id, info in self.responses.items()
+ }
+
+
+class Websocket:
+ def __init__(
+ self,
+ ws_url: str,
+ max_subscriptions=1024,
+ max_connections=100,
+ shutdown_timer=5,
+ options: Optional[dict] = None,
+ ):
+ """
+ Websocket manager object. Allows for the use of a single websocket connection by multiple
+ calls.
+
+ :param ws_url: Websocket URL to connect to
+ :param max_subscriptions: Maximum number of subscriptions per websocket connection
+ :param max_connections: Maximum number of connections total
+ :param shutdown_timer: Number of seconds to shut down websocket connection after last use
+ """
+ # TODO allow setting max concurrent connections and rpc subscriptions per connection
+ # TODO reconnection logic
+ self.ws_url = ws_url
+ self.ws: Optional[websockets.WebSocketClientProtocol] = None
+ self.id = 0
+ self.max_subscriptions = max_subscriptions
+ self.max_connections = max_connections
+ self.shutdown_timer = shutdown_timer
+ self._received = {}
+ self._in_use = 0
+ self._receiving_task = None
+ self._attempts = 0
+ self._initialized = False
+ self._lock = asyncio.Lock()
+ self._exit_task = None
+ self._open_subscriptions = 0
+ self._options = options if options else {}
+
+ async def __aenter__(self):
+ async with self._lock:
+ self._in_use += 1
+ if self._exit_task:
+ self._exit_task.cancel()
+ if not self._initialized:
+ self._initialized = True
+ await self._connect()
+ self._receiving_task = asyncio.create_task(self._start_receiving())
+ return self
+
+ async def _connect(self):
+ self.ws = await asyncio.wait_for(
+ websockets.connect(self.ws_url, **self._options), timeout=10
+ )
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ async with self._lock:
+ self._in_use -= 1
+ if self._exit_task is not None:
+ self._exit_task.cancel()
+ try:
+ await self._exit_task
+ except asyncio.CancelledError:
+ pass
+ if self._in_use == 0 and self.ws is not None:
+ self.id = 0
+ self._open_subscriptions = 0
+ self._exit_task = asyncio.create_task(self._exit_with_timer())
+
+ async def _exit_with_timer(self):
+ """
+ Allows for graceful shutdown of websocket connection after specified number of seconds, allowing
+ for reuse of the websocket connection.
+ """
+ try:
+ await asyncio.sleep(self.shutdown_timer)
+ await self.shutdown()
+ except asyncio.CancelledError:
+ pass
+
+ async def shutdown(self):
+ async with self._lock:
+ try:
+ self._receiving_task.cancel()
+ await self._receiving_task
+ await self.ws.close()
+ except (AttributeError, asyncio.CancelledError):
+ pass
+ self.ws = None
+ self._initialized = False
+ self._receiving_task = None
+ self.id = 0
+
+ async def _recv(self) -> None:
+ try:
+ response = json.loads(
+ await cast(websockets.WebSocketClientProtocol, self.ws).recv()
+ )
+ async with self._lock:
+ self._open_subscriptions -= 1
+ if "id" in response:
+ self._received[response["id"]] = response
+ elif "params" in response:
+ self._received[response["params"]["subscription"]] = response
+ else:
+ raise KeyError(response)
+ except websockets.ConnectionClosed:
+ raise
+ except KeyError as e:
+ raise e
+
+ async def _start_receiving(self):
+ try:
+ while True:
+ await self._recv()
+ except asyncio.CancelledError:
+ pass
+ except websockets.ConnectionClosed:
+ # TODO try reconnect, but only if it's needed
+ raise
+
+ async def send(self, payload: dict) -> int:
+ """
+ Sends a payload to the websocket connection.
+
+ :param payload: payload, generate a payload with the AsyncSubstrateInterface.make_payload method
+ """
+ async with self._lock:
+ original_id = self.id
+ self.id += 1
+ self._open_subscriptions += 1
+ try:
+ await self.ws.send(json.dumps({**payload, **{"id": original_id}}))
+ return original_id
+ except websockets.ConnectionClosed:
+ raise
+
+ async def retrieve(self, item_id: int) -> Optional[dict]:
+ """
+ Retrieves a single item from received responses dict queue
+
+ :param item_id: id of the item to retrieve
+
+ :return: retrieved item
+ """
+ while True:
+ async with self._lock:
+ if item_id in self._received:
+ return self._received.pop(item_id)
+ await asyncio.sleep(0.1)
+
+
+class AsyncSubstrateInterface:
+ runtime = None
+ registry: Optional[PortableRegistry] = None
+
+ def __init__(
+ self,
+ chain_endpoint: str,
+ use_remote_preset=False,
+ auto_discover=True,
+ auto_reconnect=True,
+ ss58_format=None,
+ type_registry=None,
+ chain_name=None,
+ ):
+ """
+ The asyncio-compatible version of the subtensor interface commands we use in bittensor
+ """
+ self.chain_endpoint = chain_endpoint
+ self.__chain = chain_name
+ options = {
+ "max_size": 2**32,
+ "write_limit": 2**16,
+ }
+ if version.parse(websockets.__version__) < version.parse("14.0"):
+ options.update({"read_limit": 2**16})
+ self.ws = Websocket(chain_endpoint, options=options)
+ self._lock = asyncio.Lock()
+ self.last_block_hash: Optional[str] = None
+ self.config = {
+ "use_remote_preset": use_remote_preset,
+ "auto_discover": auto_discover,
+ "auto_reconnect": auto_reconnect,
+ "rpc_methods": None,
+ "strict_scale_decode": True,
+ }
+ self.initialized = False
+ self._forgettable_task = None
+ self.ss58_format = ss58_format
+ self.type_registry = type_registry
+ self.runtime_cache = RuntimeCache()
+ self.block_id: Optional[int] = None
+ self.runtime_version = None
+ self.runtime_config = RuntimeConfigurationObject()
+ self.__metadata_cache = {}
+ self.type_registry_preset = None
+ self.transaction_version = None
+ self.metadata = None
+ self.metadata_version_hex = "0x0f000000" # v15
+
+ async def __aenter__(self):
+ await self.initialize()
+
+ async def initialize(self):
+ """
+ Initialize the connection to the chain.
+ """
+ async with self._lock:
+ if not self.initialized:
+ if not self.__chain:
+ chain = await self.rpc_request("system_chain", [])
+ self.__chain = chain.get("result")
+ self.reload_type_registry()
+ await asyncio.gather(self.load_registry(), self.init_runtime(None))
+ self.initialized = True
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+ @property
+ def chain(self):
+ """
+ Returns the substrate chain currently associated with object
+ """
+ return self.__chain
+
+ async def get_storage_item(self, module: str, storage_function: str):
+ if not self.metadata:
+ await self.init_runtime()
+ metadata_pallet = self.metadata.get_metadata_pallet(module)
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+ return storage_item
+
+ async def _get_current_block_hash(
+ self, block_hash: Optional[str], reuse: bool
+ ) -> Optional[str]:
+ if block_hash:
+ self.last_block_hash = block_hash
+ return block_hash
+ elif reuse:
+ if self.last_block_hash:
+ return self.last_block_hash
+ return block_hash
+
+ async def load_registry(self):
+ metadata_rpc_result = await self.rpc_request(
+ "state_call",
+ ["Metadata_metadata_at_version", self.metadata_version_hex],
+ )
+ metadata_option_hex_str = metadata_rpc_result["result"]
+ metadata_option_bytes = bytes.fromhex(metadata_option_hex_str[2:])
+ metadata_v15 = MetadataV15.decode_from_metadata_option(metadata_option_bytes)
+ self.registry = PortableRegistry.from_metadata_v15(metadata_v15)
+
+ async def decode_scale(
+ self, type_string, scale_bytes: bytes, return_scale_obj=False
+ ):
+ """
+ Helper function to decode arbitrary SCALE-bytes (e.g. 0x02000000) according to given RUST type_string
+ (e.g. BlockNumber). The relevant versioning information of the type (if defined) will be applied if block_hash
+ is set
+
+ Parameters
+ ----------
+ type_string
+ scale_bytes
+ block_hash
+ return_scale_obj: if True the SCALE object itself is returned, otherwise the serialized dict value of the object
+
+ Returns
+ -------
+
+ """
+ if scale_bytes == b"\x00":
+ obj = None
+ else:
+ obj = decode_by_type_string(type_string, self.registry, scale_bytes)
+ return obj
+
+ async def init_runtime(
+ self, block_hash: Optional[str] = None, block_id: Optional[int] = None
+ ) -> Runtime:
+ """
+ This method is used by all other methods that deals with metadata and types defined in the type registry.
+ It optionally retrieves the block_hash when block_id is given and sets the applicable metadata for that
+ block_hash. Also, it applies all the versioned types at the time of the block_hash.
+
+ Because parsing of metadata and type registry is quite heavy, the result will be cached per runtime id.
+ In the future there could be support for caching backends like Redis to make this cache more persistent.
+
+ :param block_hash: optional block hash, should not be specified if block_id is
+ :param block_id: optional block id, should not be specified if block_hash is
+
+ :returns: Runtime object
+ """
+
+ async def get_runtime(block_hash, block_id) -> Runtime:
+ # Check if runtime state already set to current block
+ if (block_hash and block_hash == self.last_block_hash) or (
+ block_id and block_id == self.block_id
+ ):
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ if block_id is not None:
+ block_hash = await self.get_block_hash(block_id)
+
+ if not block_hash:
+ block_hash = await self.get_chain_head()
+
+ self.last_block_hash = block_hash
+ self.block_id = block_id
+
+ # In fact calls and storage functions are decoded against runtime of previous block, therefor retrieve
+ # metadata and apply type registry of runtime of parent block
+ block_header = await self.rpc_request(
+ "chain_getHeader", [self.last_block_hash]
+ )
+
+ if block_header["result"] is None:
+ raise SubstrateRequestException(
+ f'Block not found for "{self.last_block_hash}"'
+ )
+
+ parent_block_hash: str = block_header["result"]["parentHash"]
+
+ if (
+ parent_block_hash
+ == "0x0000000000000000000000000000000000000000000000000000000000000000"
+ ):
+ runtime_block_hash = self.last_block_hash
+ else:
+ runtime_block_hash = parent_block_hash
+
+ runtime_info = await self.get_block_runtime_version(
+ block_hash=runtime_block_hash
+ )
+
+ if runtime_info is None:
+ raise SubstrateRequestException(
+ f"No runtime information for block '{block_hash}'"
+ )
+
+ # Check if runtime state already set to current block
+ if runtime_info.get("specVersion") == self.runtime_version:
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ self.runtime_version = runtime_info.get("specVersion")
+ self.transaction_version = runtime_info.get("transactionVersion")
+
+ if not self.metadata:
+ if self.runtime_version in self.__metadata_cache:
+ # Get metadata from cache
+ # self.debug_message('Retrieved metadata for {} from memory'.format(self.runtime_version))
+ self.metadata = self.__metadata_cache[self.runtime_version]
+ else:
+ self.metadata = await self.get_block_metadata(
+ block_hash=runtime_block_hash, decode=True
+ )
+ # self.debug_message('Retrieved metadata for {} from Substrate node'.format(self.runtime_version))
+
+ # Update metadata cache
+ self.__metadata_cache[self.runtime_version] = self.metadata
+
+ # Update type registry
+ self.reload_type_registry(use_remote_preset=False, auto_discover=True)
+
+ if self.implements_scaleinfo:
+ # self.debug_message('Add PortableRegistry from metadata to type registry')
+ self.runtime_config.add_portable_registry(self.metadata)
+
+ # Set active runtime version
+ self.runtime_config.set_active_spec_version_id(self.runtime_version)
+
+ # Check and apply runtime constants
+ ss58_prefix_constant = await self.get_constant(
+ "System", "SS58Prefix", block_hash=block_hash
+ )
+
+ if ss58_prefix_constant:
+ self.ss58_format = ss58_prefix_constant
+
+ # Set runtime compatibility flags
+ try:
+ _ = self.runtime_config.create_scale_object(
+ "sp_weights::weight_v2::Weight"
+ )
+ self.config["is_weight_v2"] = True
+ self.runtime_config.update_type_registry_types(
+ {"Weight": "sp_weights::weight_v2::Weight"}
+ )
+ except NotImplementedError:
+ self.config["is_weight_v2"] = False
+ self.runtime_config.update_type_registry_types({"Weight": "WeightV1"})
+ return Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ if block_id and block_hash:
+ raise ValueError("Cannot provide block_hash and block_id at the same time")
+
+ if not (runtime := self.runtime_cache.retrieve(block_id, block_hash)):
+ runtime = await get_runtime(block_hash, block_id)
+ self.runtime_cache.add_item(block_id, block_hash, runtime)
+ return runtime
+
+ def reload_type_registry(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ """
+ Reload type registry and preset used to instantiate the SubtrateInterface object. Useful to periodically apply
+ changes in type definitions when a runtime upgrade occurred
+
+ Parameters
+ ----------
+ use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed scalecodec package
+ auto_discover
+
+ Returns
+ -------
+
+ """
+ self.runtime_config.clear_type_registry()
+
+ self.runtime_config.implements_scale_info = self.implements_scaleinfo
+
+ # Load metadata types in runtime configuration
+ self.runtime_config.update_type_registry(load_type_registry_preset(name="core"))
+ self.apply_type_registry_presets(
+ use_remote_preset=use_remote_preset, auto_discover=auto_discover
+ )
+
+ def apply_type_registry_presets(
+ self, use_remote_preset: bool = True, auto_discover: bool = True
+ ):
+ if self.type_registry_preset is not None:
+ # Load type registry according to preset
+ type_registry_preset_dict = load_type_registry_preset(
+ name=self.type_registry_preset, use_remote_preset=use_remote_preset
+ )
+
+ if not type_registry_preset_dict:
+ raise ValueError(
+ f"Type registry preset '{self.type_registry_preset}' not found"
+ )
+
+ elif auto_discover:
+ # Try to auto discover type registry preset by chain name
+ type_registry_name = self.chain.lower().replace(" ", "-")
+ try:
+ type_registry_preset_dict = load_type_registry_preset(
+ type_registry_name
+ )
+ # self.debug_message(f"Auto set type_registry_preset to {type_registry_name} ...")
+ self.type_registry_preset = type_registry_name
+ except ValueError:
+ type_registry_preset_dict = None
+
+ else:
+ type_registry_preset_dict = None
+
+ if type_registry_preset_dict:
+ # Load type registries in runtime configuration
+ if self.implements_scaleinfo is False:
+ # Only runtime with no embedded types in metadata need the default set of explicit defined types
+ self.runtime_config.update_type_registry(
+ load_type_registry_preset(
+ "legacy", use_remote_preset=use_remote_preset
+ )
+ )
+
+ if self.type_registry_preset != "legacy":
+ self.runtime_config.update_type_registry(type_registry_preset_dict)
+
+ if self.type_registry:
+ # Load type registries in runtime configuration
+ self.runtime_config.update_type_registry(self.type_registry)
+
+ @property
+ def implements_scaleinfo(self) -> Optional[bool]:
+ """
+ Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher)
+
+ Returns
+ -------
+ bool
+ """
+ if self.metadata:
+ return self.metadata.portable_registry is not None
+ else:
+ return None
+
+ async def create_storage_key(
+ self,
+ pallet: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: str = None,
+ ) -> StorageKey:
+ """
+ Create a `StorageKey` instance providing storage function details. See `subscribe_storage()`.
+
+ Parameters
+ ----------
+ pallet: name of pallet
+ storage_function: name of storage function
+ params: Optional list of parameters in case of a Mapped storage function
+
+ Returns
+ -------
+ StorageKey
+ """
+ await self.init_runtime(block_hash=block_hash)
+
+ return StorageKey.create_from_storage_function(
+ pallet,
+ storage_function,
+ params,
+ runtime_config=self.runtime_config,
+ metadata=self.metadata,
+ )
+
+ async def _get_block_handler(
+ self,
+ block_hash: str,
+ ignore_decoding_errors: bool = False,
+ include_author: bool = False,
+ header_only: bool = False,
+ finalized_only: bool = False,
+ subscription_handler: Optional[Callable] = None,
+ ):
+ try:
+ await self.init_runtime(block_hash=block_hash)
+ except BlockNotFound:
+ return None
+
+ async def decode_block(block_data, block_data_hash=None):
+ if block_data:
+ if block_data_hash:
+ block_data["header"]["hash"] = block_data_hash
+
+ if type(block_data["header"]["number"]) is str:
+ # Convert block number from hex (backwards compatibility)
+ block_data["header"]["number"] = int(
+ block_data["header"]["number"], 16
+ )
+
+ extrinsic_cls = self.runtime_config.get_decoder_class("Extrinsic")
+
+ if "extrinsics" in block_data:
+ for idx, extrinsic_data in enumerate(block_data["extrinsics"]):
+ extrinsic_decoder = extrinsic_cls(
+ data=ScaleBytes(extrinsic_data),
+ metadata=self.metadata,
+ runtime_config=self.runtime_config,
+ )
+ try:
+ extrinsic_decoder.decode(check_remaining=True)
+ block_data["extrinsics"][idx] = extrinsic_decoder
+
+ except Exception as e:
+ if not ignore_decoding_errors:
+ raise
+ block_data["extrinsics"][idx] = None
+
+ for idx, log_data in enumerate(block_data["header"]["digest"]["logs"]):
+ if type(log_data) is str:
+ # Convert digest log from hex (backwards compatibility)
+ try:
+ log_digest_cls = self.runtime_config.get_decoder_class(
+ "sp_runtime::generic::digest::DigestItem"
+ )
+
+ if log_digest_cls is None:
+ raise NotImplementedError(
+ "No decoding class found for 'DigestItem'"
+ )
+
+ log_digest = log_digest_cls(data=ScaleBytes(log_data))
+ log_digest.decode(
+ check_remaining=self.config.get("strict_scale_decode")
+ )
+
+ block_data["header"]["digest"]["logs"][idx] = log_digest
+
+ if include_author and "PreRuntime" in log_digest.value:
+ if self.implements_scaleinfo:
+ engine = bytes(log_digest[1][0])
+ # Retrieve validator set
+ parent_hash = block_data["header"]["parentHash"]
+ validator_set = await self.query(
+ "Session", "Validators", block_hash=parent_hash
+ )
+
+ if engine == b"BABE":
+ babe_predigest = (
+ self.runtime_config.create_scale_object(
+ type_string="RawBabePreDigest",
+ data=ScaleBytes(
+ bytes(log_digest[1][1])
+ ),
+ )
+ )
+
+ babe_predigest.decode(
+ check_remaining=self.config.get(
+ "strict_scale_decode"
+ )
+ )
+
+ rank_validator = babe_predigest[1].value[
+ "authority_index"
+ ]
+
+ block_author = validator_set[rank_validator]
+ block_data["author"] = block_author.value
+
+ elif engine == b"aura":
+ aura_predigest = (
+ self.runtime_config.create_scale_object(
+ type_string="RawAuraPreDigest",
+ data=ScaleBytes(
+ bytes(log_digest[1][1])
+ ),
+ )
+ )
+
+ aura_predigest.decode(check_remaining=True)
+
+ rank_validator = aura_predigest.value[
+ "slot_number"
+ ] % len(validator_set)
+
+ block_author = validator_set[rank_validator]
+ block_data["author"] = block_author.value
+ else:
+ raise NotImplementedError(
+ f"Cannot extract author for engine {log_digest.value['PreRuntime'][0]}"
+ )
+ else:
+ if (
+ log_digest.value["PreRuntime"]["engine"]
+ == "BABE"
+ ):
+ validator_set = await self.query(
+ "Session",
+ "Validators",
+ block_hash=block_hash,
+ )
+ rank_validator = log_digest.value["PreRuntime"][
+ "data"
+ ]["authority_index"]
+
+ block_author = validator_set.elements[
+ rank_validator
+ ]
+ block_data["author"] = block_author.value
+ else:
+ raise NotImplementedError(
+ f"Cannot extract author for engine {log_digest.value['PreRuntime']['engine']}"
+ )
+
+ except Exception:
+ if not ignore_decoding_errors:
+ raise
+ block_data["header"]["digest"]["logs"][idx] = None
+
+ return block_data
+
+ if callable(subscription_handler):
+ rpc_method_prefix = "Finalized" if finalized_only else "New"
+
+ async def result_handler(message, update_nr, subscription_id):
+ new_block = await decode_block({"header": message["params"]["result"]})
+
+ subscription_result = subscription_handler(
+ new_block, update_nr, subscription_id
+ )
+
+ if subscription_result is not None:
+ # Handler returned end result: unsubscribe from further updates
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request(
+ f"chain_unsubscribe{rpc_method_prefix}Heads",
+ [subscription_id],
+ )
+ )
+
+ return subscription_result
+
+ result = await self._make_rpc_request(
+ [
+ self.make_payload(
+ "_get_block_handler",
+ f"chain_subscribe{rpc_method_prefix}Heads",
+ [],
+ )
+ ],
+ result_handler=result_handler,
+ )
+
+ return result
+
+ else:
+ if header_only:
+ response = await self.rpc_request("chain_getHeader", [block_hash])
+ return await decode_block(
+ {"header": response["result"]}, block_data_hash=block_hash
+ )
+
+ else:
+ response = await self.rpc_request("chain_getBlock", [block_hash])
+ return await decode_block(
+ response["result"]["block"], block_data_hash=block_hash
+ )
+
+ async def get_block(
+ self,
+ block_hash: Optional[str] = None,
+ block_number: Optional[int] = None,
+ ignore_decoding_errors: bool = False,
+ include_author: bool = False,
+ finalized_only: bool = False,
+ ) -> Optional[dict]:
+ """
+ Retrieves a block and decodes its containing extrinsics and log digest items. If `block_hash` and `block_number`
+ is omitted the chain tip will be retrieve, or the finalized head if `finalized_only` is set to true.
+
+ Either `block_hash` or `block_number` should be set, or both omitted.
+
+ Parameters
+ ----------
+ block_hash: the hash of the block to be retrieved
+ block_number: the block number to retrieved
+ ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue decoding
+ include_author: This will retrieve the block author from the validator set and add to the result
+ finalized_only: when no `block_hash` or `block_number` is set, this will retrieve the finalized head
+
+ Returns
+ -------
+ A dict containing the extrinsic and digest logs data
+ """
+ if block_hash and block_number:
+ raise ValueError("Either block_hash or block_number should be be set")
+
+ if block_number is not None:
+ block_hash = await self.get_block_hash(block_number)
+
+ if block_hash is None:
+ return
+
+ if block_hash and finalized_only:
+ raise ValueError(
+ "finalized_only cannot be True when block_hash is provided"
+ )
+
+ if block_hash is None:
+ # Retrieve block hash
+ if finalized_only:
+ block_hash = await self.get_chain_finalised_head()
+ else:
+ block_hash = await self.get_chain_head()
+
+ return await self._get_block_handler(
+ block_hash=block_hash,
+ ignore_decoding_errors=ignore_decoding_errors,
+ header_only=False,
+ include_author=include_author,
+ )
+
+ async def get_events(self, block_hash: Optional[str] = None) -> list:
+ """
+ Convenience method to get events for a certain block (storage call for module 'System' and function 'Events')
+
+ Parameters
+ ----------
+ block_hash
+
+ Returns
+ -------
+ list
+ """
+
+ def convert_event_data(data):
+ # Extract phase information
+ phase_key, phase_value = next(iter(data["phase"].items()))
+ try:
+ extrinsic_idx = phase_value[0]
+ except IndexError:
+ extrinsic_idx = None
+
+ # Extract event details
+ module_id, event_data = next(iter(data["event"].items()))
+ event_id, attributes_data = next(iter(event_data[0].items()))
+
+ # Convert class and pays_fee dictionaries to their string equivalents if they exist
+ attributes = attributes_data
+ if isinstance(attributes, dict):
+ for key, value in attributes.items():
+ if isinstance(value, dict):
+ # Convert nested single-key dictionaries to their keys as strings
+ sub_key = next(iter(value.keys()))
+ if value[sub_key] == ():
+ attributes[key] = sub_key
+
+ # Create the converted dictionary
+ converted = {
+ "phase": phase_key,
+ "extrinsic_idx": extrinsic_idx,
+ "event": {
+ "module_id": module_id,
+ "event_id": event_id,
+ "attributes": attributes,
+ },
+ "topics": list(data["topics"]), # Convert topics tuple to a list
+ }
+
+ return converted
+
+ events = []
+
+ if not block_hash:
+ block_hash = await self.get_chain_head()
+
+ storage_obj = await self.query(
+ module="System", storage_function="Events", block_hash=block_hash
+ )
+ if storage_obj:
+ for item in list(storage_obj):
+ # print("item!", item)
+ events.append(convert_event_data(item))
+ # events += list(storage_obj)
+ return events
+
+ async def get_block_runtime_version(self, block_hash: str) -> dict:
+ """
+ Retrieve the runtime version id of given block_hash
+ """
+ response = await self.rpc_request("state_getRuntimeVersion", [block_hash])
+ return response.get("result")
+
+ async def get_block_metadata(
+ self, block_hash: Optional[str] = None, decode: bool = True
+ ) -> Union[dict, ScaleType]:
+ """
+ A pass-though to existing JSONRPC method `state_getMetadata`.
+
+ Parameters
+ ----------
+ block_hash
+ decode: True for decoded version
+
+ Returns
+ -------
+
+ """
+ params = None
+ if decode and not self.runtime_config:
+ raise ValueError(
+ "Cannot decode runtime configuration without a supplied runtime_config"
+ )
+
+ if block_hash:
+ params = [block_hash]
+ response = await self.rpc_request("state_getMetadata", params)
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ if response.get("result") and decode:
+ metadata_decoder = self.runtime_config.create_scale_object(
+ "MetadataVersioned", data=ScaleBytes(response.get("result"))
+ )
+ metadata_decoder.decode()
+
+ return metadata_decoder
+
+ return response
+
+ async def _preprocess(
+ self,
+ query_for: Optional[list],
+ block_hash: Optional[str],
+ storage_function: str,
+ module: str,
+ ) -> Preprocessed:
+ """
+ Creates a Preprocessed data object for passing to `_make_rpc_request`
+ """
+ params = query_for if query_for else []
+ # Search storage call in metadata
+ metadata_pallet = self.metadata.get_metadata_pallet(module)
+
+ if not metadata_pallet:
+ raise SubstrateRequestException(f'Pallet "{module}" not found')
+
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+
+ if not metadata_pallet or not storage_item:
+ raise SubstrateRequestException(
+ f'Storage function "{module}.{storage_function}" not found'
+ )
+
+ # SCALE type string of value
+ param_types = storage_item.get_params_type_string()
+ value_scale_type = storage_item.get_value_type_string()
+
+ if len(params) != len(param_types):
+ raise ValueError(
+ f"Storage function requires {len(param_types)} parameters, {len(params)} given"
+ )
+
+ storage_key = StorageKey.create_from_storage_function(
+ module,
+ storage_item.value["name"],
+ params,
+ runtime_config=self.runtime_config,
+ metadata=self.metadata,
+ )
+ method = "state_getStorageAt"
+ return Preprocessed(
+ str(query_for),
+ method,
+ [storage_key.to_hex(), block_hash],
+ value_scale_type,
+ storage_item,
+ )
+
+ async def _process_response(
+ self,
+ response: dict,
+ subscription_id: Union[int, str],
+ value_scale_type: Optional[str] = None,
+ storage_item: Optional[ScaleType] = None,
+ runtime: Optional[Runtime] = None,
+ result_handler: Optional[ResultHandler] = None,
+ ) -> tuple[Union[ScaleType, dict], bool]:
+ """
+ Processes the RPC call response by decoding it, returning it as is, or setting a handler for subscriptions,
+ depending on the specific call.
+
+ :param response: the RPC call response
+ :param subscription_id: the subscription id for subscriptions, used only for subscriptions with a result handler
+ :param value_scale_type: Scale Type string used for decoding ScaleBytes results
+ :param storage_item: The ScaleType object used for decoding ScaleBytes results
+ :param runtime: the runtime object, used for decoding ScaleBytes results
+ :param result_handler: the result handler coroutine used for handling longer-running subscriptions
+
+ :return: (decoded response, completion)
+ """
+ result: Union[dict, ScaleType] = response
+ if value_scale_type and isinstance(storage_item, ScaleType):
+ if not runtime:
+ async with self._lock:
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+ if response.get("result") is not None:
+ query_value = response.get("result")
+ elif storage_item.value["modifier"] == "Default":
+ # Fallback to default value of storage function if no result
+ query_value = storage_item.value_object["default"].value_object
+ else:
+ # No result is interpreted as an Option<...> result
+ value_scale_type = f"Option<{value_scale_type}>"
+ query_value = storage_item.value_object["default"].value_object
+ if isinstance(query_value, str):
+ q = bytes.fromhex(query_value[2:])
+ elif isinstance(query_value, bytearray):
+ q = bytes(query_value)
+ else:
+ q = query_value
+ obj = await self.decode_scale(value_scale_type, q, True)
+ result = obj
+ if asyncio.iscoroutinefunction(result_handler):
+ # For multipart responses as a result of subscriptions.
+ message, bool_result = await result_handler(response, subscription_id)
+ return message, bool_result
+ return result, True
+
+ async def _make_rpc_request(
+ self,
+ payloads: list[dict],
+ value_scale_type: Optional[str] = None,
+ storage_item: Optional[ScaleType] = None,
+ runtime: Optional[Runtime] = None,
+ result_handler: Optional[ResultHandler] = None,
+ ) -> RequestManager.RequestResults:
+ request_manager = RequestManager(payloads)
+
+ subscription_added = False
+
+ async with self.ws as ws:
+ for item in payloads:
+ item_id = await ws.send(item["payload"])
+ request_manager.add_request(item_id, item["id"])
+
+ while True:
+ for item_id in request_manager.response_map.keys():
+ if (
+ item_id not in request_manager.responses
+ or asyncio.iscoroutinefunction(result_handler)
+ ):
+ if response := await ws.retrieve(item_id):
+ if (
+ asyncio.iscoroutinefunction(result_handler)
+ and not subscription_added
+ ):
+ # handles subscriptions, overwrites the previous mapping of {item_id : payload_id}
+ # with {subscription_id : payload_id}
+ try:
+ item_id = request_manager.overwrite_request(
+ item_id, response["result"]
+ )
+ except KeyError:
+ raise SubstrateRequestException(str(response))
+ decoded_response, complete = await self._process_response(
+ response,
+ item_id,
+ value_scale_type,
+ storage_item,
+ runtime,
+ result_handler,
+ )
+ request_manager.add_response(
+ item_id, decoded_response, complete
+ )
+ if (
+ asyncio.iscoroutinefunction(result_handler)
+ and not subscription_added
+ ):
+ subscription_added = True
+ break
+
+ if request_manager.is_complete:
+ break
+
+ return request_manager.get_results()
+
+ @staticmethod
+ def make_payload(id_: str, method: str, params: list) -> dict:
+ """
+ Creates a payload for making an rpc_request with _make_rpc_request
+
+ :param id_: a unique name you would like to give to this request
+ :param method: the method in the RPC request
+ :param params: the params in the RPC request
+
+ :return: the payload dict
+ """
+ return {
+ "id": id_,
+ "payload": {"jsonrpc": "2.0", "method": method, "params": params},
+ }
+
+ async def rpc_request(
+ self,
+ method: str,
+ params: Optional[list],
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> Any:
+ """
+ Makes an RPC request to the subtensor. Use this only if ``self.query`` and ``self.query_multiple`` and
+ ``self.query_map`` do not meet your needs.
+
+ :param method: str the method in the RPC request
+ :param params: list of the params in the RPC request
+ :param block_hash: optional str, the hash of the block — only supply this if not supplying the block
+ hash in the params, and not reusing the block hash
+ :param reuse_block_hash: optional bool, whether to reuse the block hash in the params — only mark as True
+ if not supplying the block hash in the params, or via the `block_hash` parameter
+
+ :return: the response from the RPC request
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ params = params or []
+ payload_id = f"{method}{random.randint(0, 7000)}"
+ payloads = [
+ self.make_payload(
+ payload_id,
+ method,
+ params + [block_hash] if block_hash else params,
+ )
+ ]
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+ result = await self._make_rpc_request(payloads, runtime=runtime)
+ if "error" in result[payload_id][0]:
+ raise SubstrateRequestException(result[payload_id][0]["error"]["message"])
+ if "result" in result[payload_id][0]:
+ return result[payload_id][0]
+ else:
+ raise SubstrateRequestException(result[payload_id][0])
+
+ async def get_block_hash(self, block_id: int) -> str:
+ return (await self.rpc_request("chain_getBlockHash", [block_id]))["result"]
+
+ async def get_chain_head(self) -> str:
+ result = await self._make_rpc_request(
+ [
+ self.make_payload(
+ "rpc_request",
+ "chain_getHead",
+ [],
+ )
+ ],
+ runtime=Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ ),
+ )
+ self.last_block_hash = result["rpc_request"][0]["result"]
+ return result["rpc_request"][0]["result"]
+
+ async def compose_call(
+ self,
+ call_module: str,
+ call_function: str,
+ call_params: Optional[dict] = None,
+ block_hash: Optional[str] = None,
+ ) -> GenericCall:
+ """
+ Composes a call payload which can be used in an extrinsic.
+
+ :param call_module: Name of the runtime module e.g. Balances
+ :param call_function: Name of the call function e.g. transfer
+ :param call_params: This is a dict containing the params of the call. e.g.
+ `{'dest': 'EaG2CRhJWPb7qmdcJvy3LiWdh26Jreu9Dx6R1rXxPmYXoDk', 'value': 1000000000000}`
+ :param block_hash: Use metadata at given block_hash to compose call
+
+ :return: A composed call
+ """
+ if call_params is None:
+ call_params = {}
+
+ await self.init_runtime(block_hash=block_hash)
+
+ call = self.runtime_config.create_scale_object(
+ type_string="Call", metadata=self.metadata
+ )
+
+ call.encode(
+ {
+ "call_module": call_module,
+ "call_function": call_function,
+ "call_args": call_params,
+ }
+ )
+
+ return call
+
+ async def query_multiple(
+ self,
+ params: list,
+ storage_function: str,
+ module: str,
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> dict[str, ScaleType]:
+ """
+ Queries the subtensor. Only use this when making multiple queries, else use ``self.query``
+ """
+ # By allowing for specifying the block hash, users, if they have multiple query types they want
+ # to do, can simply query the block hash first, and then pass multiple query_subtensor calls
+ # into an asyncio.gather, with the specified block hash
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+ preprocessed: tuple[Preprocessed] = await asyncio.gather(
+ *[
+ self._preprocess([x], block_hash, storage_function, module)
+ for x in params
+ ]
+ )
+ all_info = [
+ self.make_payload(item.queryable, item.method, item.params)
+ for item in preprocessed
+ ]
+ # These will always be the same throughout the preprocessed list, so we just grab the first one
+ value_scale_type = preprocessed[0].value_scale_type
+ storage_item = preprocessed[0].storage_item
+
+ responses = await self._make_rpc_request(
+ all_info, value_scale_type, storage_item, runtime
+ )
+ return {
+ param: responses[p.queryable][0] for (param, p) in zip(params, preprocessed)
+ }
+
+ async def query_multi(
+ self, storage_keys: list[StorageKey], block_hash: Optional[str] = None
+ ) -> list:
+ """
+ Query multiple storage keys in one request.
+
+ Example:
+
+ ```
+ storage_keys = [
+ substrate.create_storage_key(
+ "System", "Account", ["F4xQKRUagnSGjFqafyhajLs94e7Vvzvr8ebwYJceKpr8R7T"]
+ ),
+ substrate.create_storage_key(
+ "System", "Account", ["GSEX8kR4Kz5UZGhvRUCJG93D5hhTAoVZ5tAe6Zne7V42DSi"]
+ )
+ ]
+
+ result = substrate.query_multi(storage_keys)
+ ```
+
+ Parameters
+ ----------
+ storage_keys: list of StorageKey objects
+ block_hash: Optional block_hash of state snapshot
+
+ Returns
+ -------
+ list of `(storage_key, scale_obj)` tuples
+ """
+
+ await self.init_runtime(block_hash=block_hash)
+
+ # Retrieve corresponding value
+ response = await self.rpc_request(
+ "state_queryStorageAt", [[s.to_hex() for s in storage_keys], block_hash]
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ result = []
+
+ storage_key_map = {s.to_hex(): s for s in storage_keys}
+
+ for result_group in response["result"]:
+ for change_storage_key, change_data in result_group["changes"]:
+ # Decode result for specified storage_key
+ storage_key = storage_key_map[change_storage_key]
+ if change_data is None:
+ change_data = b"\x00"
+ else:
+ change_data = bytes.fromhex(change_data[2:])
+ result.append(
+ (
+ storage_key,
+ await self.decode_scale(
+ storage_key.value_scale_type, change_data
+ ),
+ )
+ )
+
+ return result
+
+ async def create_scale_object(
+ self,
+ type_string: str,
+ data: Optional[ScaleBytes] = None,
+ block_hash: Optional[str] = None,
+ **kwargs,
+ ) -> "ScaleType":
+ """
+ Convenience method to create a SCALE object of type `type_string`, this will initialize the runtime
+ automatically at moment of `block_hash`, or chain tip if omitted.
+
+ :param type_string: str Name of SCALE type to create
+ :param data: ScaleBytes Optional ScaleBytes to decode
+ :param block_hash: Optional block hash for moment of decoding, when omitted the chain tip will be used
+ :param kwargs: keyword args for the Scale Type constructor
+
+ :return: The created Scale Type object
+ """
+ runtime = await self.init_runtime(block_hash=block_hash)
+ if "metadata" not in kwargs:
+ kwargs["metadata"] = runtime.metadata
+
+ return runtime.runtime_config.create_scale_object(
+ type_string, data=data, **kwargs
+ )
+
+ async def generate_signature_payload(
+ self,
+ call: GenericCall,
+ era=None,
+ nonce: int = 0,
+ tip: int = 0,
+ tip_asset_id: Optional[int] = None,
+ include_call_length: bool = False,
+ ) -> ScaleBytes:
+ # Retrieve genesis hash
+ genesis_hash = await self.get_block_hash(0)
+
+ if not era:
+ era = "00"
+
+ if era == "00":
+ # Immortal extrinsic
+ block_hash = genesis_hash
+ else:
+ # Determine mortality of extrinsic
+ era_obj = self.runtime_config.create_scale_object("Era")
+
+ if isinstance(era, dict) and "current" not in era and "phase" not in era:
+ raise ValueError(
+ 'The era dict must contain either "current" or "phase" element to encode a valid era'
+ )
+
+ era_obj.encode(era)
+ block_hash = await self.get_block_hash(
+ block_id=era_obj.birth(era.get("current"))
+ )
+
+ # Create signature payload
+ signature_payload = self.runtime_config.create_scale_object(
+ "ExtrinsicPayloadValue"
+ )
+
+ # Process signed extensions in metadata
+ if "signed_extensions" in self.metadata[1][1]["extrinsic"]:
+ # Base signature payload
+ signature_payload.type_mapping = [["call", "CallBytes"]]
+
+ # Add signed extensions to payload
+ signed_extensions = self.metadata.get_signed_extensions()
+
+ if "CheckMortality" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["era", signed_extensions["CheckMortality"]["extrinsic"]]
+ )
+
+ if "CheckEra" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["era", signed_extensions["CheckEra"]["extrinsic"]]
+ )
+
+ if "CheckNonce" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["nonce", signed_extensions["CheckNonce"]["extrinsic"]]
+ )
+
+ if "ChargeTransactionPayment" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["tip", signed_extensions["ChargeTransactionPayment"]["extrinsic"]]
+ )
+
+ if "ChargeAssetTxPayment" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["asset_id", signed_extensions["ChargeAssetTxPayment"]["extrinsic"]]
+ )
+
+ if "CheckMetadataHash" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["mode", signed_extensions["CheckMetadataHash"]["extrinsic"]]
+ )
+
+ if "CheckSpecVersion" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "spec_version",
+ signed_extensions["CheckSpecVersion"]["additional_signed"],
+ ]
+ )
+
+ if "CheckTxVersion" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "transaction_version",
+ signed_extensions["CheckTxVersion"]["additional_signed"],
+ ]
+ )
+
+ if "CheckGenesis" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "genesis_hash",
+ signed_extensions["CheckGenesis"]["additional_signed"],
+ ]
+ )
+
+ if "CheckMortality" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "block_hash",
+ signed_extensions["CheckMortality"]["additional_signed"],
+ ]
+ )
+
+ if "CheckEra" in signed_extensions:
+ signature_payload.type_mapping.append(
+ ["block_hash", signed_extensions["CheckEra"]["additional_signed"]]
+ )
+
+ if "CheckMetadataHash" in signed_extensions:
+ signature_payload.type_mapping.append(
+ [
+ "metadata_hash",
+ signed_extensions["CheckMetadataHash"]["additional_signed"],
+ ]
+ )
+
+ if include_call_length:
+ length_obj = self.runtime_config.create_scale_object("Bytes")
+ call_data = str(length_obj.encode(str(call.data)))
+
+ else:
+ call_data = str(call.data)
+
+ payload_dict = {
+ "call": call_data,
+ "era": era,
+ "nonce": nonce,
+ "tip": tip,
+ "spec_version": self.runtime_version,
+ "genesis_hash": genesis_hash,
+ "block_hash": block_hash,
+ "transaction_version": self.transaction_version,
+ "asset_id": {"tip": tip, "asset_id": tip_asset_id},
+ "metadata_hash": None,
+ "mode": "Disabled",
+ }
+
+ signature_payload.encode(payload_dict)
+
+ if signature_payload.data.length > 256:
+ return ScaleBytes(
+ data=blake2b(signature_payload.data.data, digest_size=32).digest()
+ )
+
+ return signature_payload.data
+
+ async def create_signed_extrinsic(
+ self,
+ call: GenericCall,
+ keypair: Keypair,
+ era: Optional[dict] = None,
+ nonce: Optional[int] = None,
+ tip: int = 0,
+ tip_asset_id: Optional[int] = None,
+ signature: Optional[Union[bytes, str]] = None,
+ ) -> "GenericExtrinsic":
+ """
+ Creates an extrinsic signed by given account details
+
+ :param call: GenericCall to create extrinsic for
+ :param keypair: Keypair used to sign the extrinsic
+ :param era: Specify mortality in blocks in follow format:
+ {'period': [amount_blocks]} If omitted the extrinsic is immortal
+ :param nonce: nonce to include in extrinsics, if omitted the current nonce is retrieved on-chain
+ :param tip: The tip for the block author to gain priority during network congestion
+ :param tip_asset_id: Optional asset ID with which to pay the tip
+ :param signature: Optionally provide signature if externally signed
+
+ :return: The signed Extrinsic
+ """
+ await self.init_runtime()
+
+ # Check requirements
+ if not isinstance(call, GenericCall):
+ raise TypeError("'call' must be of type Call")
+
+ # Check if extrinsic version is supported
+ if self.metadata[1][1]["extrinsic"]["version"] != 4: # type: ignore
+ raise NotImplementedError(
+ f"Extrinsic version {self.metadata[1][1]['extrinsic']['version']} not supported" # type: ignore
+ )
+
+ # Retrieve nonce
+ if nonce is None:
+ nonce = await self.get_account_nonce(keypair.ss58_address) or 0
+
+ # Process era
+ if era is None:
+ era = "00"
+ else:
+ if isinstance(era, dict) and "current" not in era and "phase" not in era:
+ # Retrieve current block id
+ era["current"] = await self.get_block_number(
+ await self.get_chain_finalised_head()
+ )
+
+ if signature is not None:
+ if isinstance(signature, str) and signature[0:2] == "0x":
+ signature = bytes.fromhex(signature[2:])
+
+ # Check if signature is a MultiSignature and contains signature version
+ if len(signature) == 65:
+ signature_version = signature[0]
+ signature = signature[1:]
+ else:
+ signature_version = keypair.crypto_type
+
+ else:
+ # Create signature payload
+ signature_payload = await self.generate_signature_payload(
+ call=call, era=era, nonce=nonce, tip=tip, tip_asset_id=tip_asset_id
+ )
+
+ # Set Signature version to crypto type of keypair
+ signature_version = keypair.crypto_type
+
+ # Sign payload
+ signature = keypair.sign(signature_payload)
+
+ # Create extrinsic
+ extrinsic = self.runtime_config.create_scale_object(
+ type_string="Extrinsic", metadata=self.metadata
+ )
+
+ value = {
+ "account_id": f"0x{keypair.public_key.hex()}",
+ "signature": f"0x{signature.hex()}",
+ "call_function": call.value["call_function"],
+ "call_module": call.value["call_module"],
+ "call_args": call.value["call_args"],
+ "nonce": nonce,
+ "era": era,
+ "tip": tip,
+ "asset_id": {"tip": tip, "asset_id": tip_asset_id},
+ "mode": "Disabled",
+ }
+
+ # Check if ExtrinsicSignature is MultiSignature, otherwise omit signature_version
+ signature_cls = self.runtime_config.get_decoder_class("ExtrinsicSignature")
+ if issubclass(signature_cls, self.runtime_config.get_decoder_class("Enum")):
+ value["signature_version"] = signature_version
+
+ extrinsic.encode(value)
+
+ return extrinsic
+
+ async def get_chain_finalised_head(self):
+ """
+ A pass-though to existing JSONRPC method `chain_getFinalizedHead`
+
+ Returns
+ -------
+
+ """
+ response = await self.rpc_request("chain_getFinalizedHead", [])
+
+ if response is not None:
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ return response.get("result")
+
+ async def runtime_call(
+ self,
+ api: str,
+ method: str,
+ params: Optional[Union[list, dict]] = None,
+ block_hash: Optional[str] = None,
+ ) -> ScaleType:
+ """
+ Calls a runtime API method
+
+ :param api: Name of the runtime API e.g. 'TransactionPaymentApi'
+ :param method: Name of the method e.g. 'query_fee_details'
+ :param params: List of parameters needed to call the runtime API
+ :param block_hash: Hash of the block at which to make the runtime API call
+
+ :return: ScaleType from the runtime call
+ """
+ await self.init_runtime()
+
+ if params is None:
+ params = {}
+
+ try:
+ runtime_call_def = self.runtime_config.type_registry["runtime_api"][api][
+ "methods"
+ ][method]
+ runtime_api_types = self.runtime_config.type_registry["runtime_api"][
+ api
+ ].get("types", {})
+ except KeyError:
+ raise ValueError(f"Runtime API Call '{api}.{method}' not found in registry")
+
+ if isinstance(params, list) and len(params) != len(runtime_call_def["params"]):
+ raise ValueError(
+ f"Number of parameter provided ({len(params)}) does not "
+ f"match definition {len(runtime_call_def['params'])}"
+ )
+
+ # Add runtime API types to registry
+ self.runtime_config.update_type_registry_types(runtime_api_types)
+ runtime = Runtime(
+ self.chain,
+ self.runtime_config,
+ self.metadata,
+ self.type_registry,
+ )
+
+ # Encode params
+ param_data = ScaleBytes(bytes())
+ for idx, param in enumerate(runtime_call_def["params"]):
+ scale_obj = runtime.runtime_config.create_scale_object(param["type"])
+ if isinstance(params, list):
+ param_data += scale_obj.encode(params[idx])
+ else:
+ if param["name"] not in params:
+ raise ValueError(f"Runtime Call param '{param['name']}' is missing")
+
+ param_data += scale_obj.encode(params[param["name"]])
+
+ # RPC request
+ result_data = await self.rpc_request(
+ "state_call", [f"{api}_{method}", str(param_data), block_hash]
+ )
+
+ # Decode result
+ # TODO update this to use bt-decode
+ result_obj = runtime.runtime_config.create_scale_object(
+ runtime_call_def["type"]
+ )
+ result_obj.decode(
+ ScaleBytes(result_data["result"]),
+ check_remaining=self.config.get("strict_scale_decode"),
+ )
+
+ return result_obj
+
+ async def get_account_nonce(self, account_address: str) -> int:
+ """
+ Returns current nonce for given account address
+
+ :param account_address: SS58 formatted address
+
+ :return: Nonce for given account address
+ """
+ nonce_obj = await self.runtime_call(
+ "AccountNonceApi", "account_nonce", [account_address]
+ )
+ return nonce_obj.value
+
+ async def get_metadata_constant(self, module_name, constant_name, block_hash=None):
+ """
+ Retrieves the details of a constant for given module name, call function name and block_hash
+ (or chaintip if block_hash is omitted)
+
+ Parameters
+ ----------
+ module_name
+ constant_name
+ block_hash
+
+ Returns
+ -------
+ MetadataModuleConstants
+ """
+
+ # await self.init_runtime(block_hash=block_hash)
+
+ for module in self.metadata.pallets:
+ if module_name == module.name and module.constants:
+ for constant in module.constants:
+ if constant_name == constant.value["name"]:
+ return constant
+
+ async def get_constant(
+ self,
+ module_name: str,
+ constant_name: str,
+ block_hash: Optional[str] = None,
+ reuse_block_hash: bool = False,
+ ) -> "ScaleType":
+ """
+ Returns the decoded `ScaleType` object of the constant for given module name, call function name and block_hash
+ (or chaintip if block_hash is omitted)
+
+ Parameters
+ ----------
+ :param module_name: Name of the module to query
+ :param constant_name: Name of the constant to query
+ :param block_hash: Hash of the block at which to make the runtime API call
+ :param reuse_block_hash: Reuse last-used block hash if set to true
+
+ :return: ScaleType from the runtime call
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ constant = await self.get_metadata_constant(
+ module_name, constant_name, block_hash=block_hash
+ )
+ if constant:
+ # Decode to ScaleType
+ return await self.decode_scale(
+ constant.type,
+ bytes(constant.constant_value),
+ return_scale_obj=True,
+ )
+ else:
+ return None
+
+ async def get_payment_info(
+ self, call: GenericCall, keypair: Keypair
+ ) -> dict[str, Any]:
+ """
+ Retrieves fee estimation via RPC for given extrinsic
+
+ Parameters
+ ----------
+ call: Call object to estimate fees for
+ keypair: Keypair of the sender, does not have to include private key because no valid signature is required
+
+ Returns
+ -------
+ Dict with payment info
+
+ E.g. `{'class': 'normal', 'partialFee': 151000000, 'weight': {'ref_time': 143322000}}`
+
+ """
+
+ # Check requirements
+ if not isinstance(call, GenericCall):
+ raise TypeError("'call' must be of type Call")
+
+ if not isinstance(keypair, Keypair):
+ raise TypeError("'keypair' must be of type Keypair")
+
+ # No valid signature is required for fee estimation
+ signature = "0x" + "00" * 64
+
+ # Create extrinsic
+ extrinsic = await self.create_signed_extrinsic(
+ call=call, keypair=keypair, signature=signature
+ )
+ extrinsic_len = self.runtime_config.create_scale_object("u32")
+ extrinsic_len.encode(len(extrinsic.data))
+
+ result = await self.runtime_call(
+ "TransactionPaymentApi", "query_info", [extrinsic, extrinsic_len]
+ )
+
+ return result.value
+
+ async def query(
+ self,
+ module: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ raw_storage_key: Optional[bytes] = None,
+ subscription_handler=None,
+ reuse_block_hash: bool = False,
+ ) -> Union["ScaleType"]:
+ """
+ Queries subtensor. This should only be used when making a single request. For multiple requests,
+ you should use ``self.query_multiple``
+ """
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+ preprocessed: Preprocessed = await self._preprocess(
+ params, block_hash, storage_function, module
+ )
+ payload = [
+ self.make_payload(
+ preprocessed.queryable, preprocessed.method, preprocessed.params
+ )
+ ]
+ value_scale_type = preprocessed.value_scale_type
+ storage_item = preprocessed.storage_item
+
+ responses = await self._make_rpc_request(
+ payload,
+ value_scale_type,
+ storage_item,
+ runtime,
+ result_handler=subscription_handler,
+ )
+ return responses[preprocessed.queryable][0]
+
+ async def query_map(
+ self,
+ module: str,
+ storage_function: str,
+ params: Optional[list] = None,
+ block_hash: Optional[str] = None,
+ max_results: Optional[int] = None,
+ start_key: Optional[str] = None,
+ page_size: int = 100,
+ ignore_decoding_errors: bool = False,
+ reuse_block_hash: bool = False,
+ ) -> "QueryMapResult":
+ """
+ Iterates over all key-pairs located at the given module and storage_function. The storage
+ item must be a map.
+
+ Example:
+
+ ```
+ result = await substrate.query_map('System', 'Account', max_results=100)
+
+ async for account, account_info in result:
+ print(f"Free balance of account '{account.value}': {account_info.value['data']['free']}")
+ ```
+
+ Note: it is important that you do not use `for x in result.records`, as this will sidestep possible
+ pagination. You must do `async for x in result`.
+
+ :param module: The module name in the metadata, e.g. System or Balances.
+ :param storage_function: The storage function name, e.g. Account or Locks.
+ :param params: The input parameters in case of for example a `DoubleMap` storage function
+ :param block_hash: Optional block hash for result at given block, when left to None the chain tip will be used.
+ :param max_results: the maximum of results required, if set the query will stop fetching results when number is
+ reached
+ :param start_key: The storage key used as offset for the results, for pagination purposes
+ :param page_size: The results are fetched from the node RPC in chunks of this size
+ :param ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue
+ decoding
+ :param reuse_block_hash: use True if you wish to make the query using the last-used block hash. Do not mark True
+ if supplying a block_hash
+
+ :return: QueryMapResult object
+ """
+ params = params or []
+ block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash)
+ if block_hash:
+ self.last_block_hash = block_hash
+ runtime = await self.init_runtime(block_hash=block_hash)
+
+ metadata_pallet = runtime.metadata.get_metadata_pallet(module)
+ if not metadata_pallet:
+ raise ValueError(f'Pallet "{module}" not found')
+ storage_item = metadata_pallet.get_storage_function(storage_function)
+
+ if not metadata_pallet or not storage_item:
+ raise ValueError(
+ f'Storage function "{module}.{storage_function}" not found'
+ )
+
+ value_type = storage_item.get_value_type_string()
+ param_types = storage_item.get_params_type_string()
+ key_hashers = storage_item.get_param_hashers()
+
+ # Check MapType conditions
+ if len(param_types) == 0:
+ raise ValueError("Given storage function is not a map")
+ if len(params) > len(param_types) - 1:
+ raise ValueError(
+ f"Storage function map can accept max {len(param_types) - 1} parameters, {len(params)} given"
+ )
+
+ # Generate storage key prefix
+ storage_key = StorageKey.create_from_storage_function(
+ module,
+ storage_item.value["name"],
+ params,
+ runtime_config=runtime.runtime_config,
+ metadata=runtime.metadata,
+ )
+ prefix = storage_key.to_hex()
+
+ if not start_key:
+ start_key = prefix
+
+ # Make sure if the max result is smaller than the page size, adjust the page size
+ if max_results is not None and max_results < page_size:
+ page_size = max_results
+
+ # Retrieve storage keys
+ response = await self.rpc_request(
+ method="state_getKeysPaged",
+ params=[prefix, page_size, start_key, block_hash],
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ result_keys = response.get("result")
+
+ result = []
+ last_key = None
+
+ def concat_hash_len(key_hasher: str) -> int:
+ """
+ Helper function to avoid if statements
+ """
+ if key_hasher == "Blake2_128Concat":
+ return 16
+ elif key_hasher == "Twox64Concat":
+ return 8
+ elif key_hasher == "Identity":
+ return 0
+ else:
+ raise ValueError("Unsupported hash type")
+
+ if len(result_keys) > 0:
+ last_key = result_keys[-1]
+
+ # Retrieve corresponding value
+ response = await self.rpc_request(
+ method="state_queryStorageAt", params=[result_keys, block_hash]
+ )
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ for result_group in response["result"]:
+ for item in result_group["changes"]:
+ try:
+ # Determine type string
+ key_type_string = []
+ for n in range(len(params), len(param_types)):
+ key_type_string.append(
+ f"[u8; {concat_hash_len(key_hashers[n])}]"
+ )
+ key_type_string.append(param_types[n])
+
+ item_key_obj = await self.decode_scale(
+ type_string=f"({', '.join(key_type_string)})",
+ scale_bytes=bytes.fromhex(item[0][len(prefix) :]),
+ return_scale_obj=True,
+ )
+
+ # strip key_hashers to use as item key
+ if len(param_types) - len(params) == 1:
+ item_key = item_key_obj[1]
+ else:
+ item_key = tuple(
+ item_key_obj[key + 1]
+ for key in range(len(params), len(param_types) + 1, 2)
+ )
+
+ except Exception as _:
+ if not ignore_decoding_errors:
+ raise
+ item_key = None
+
+ try:
+ try:
+ item_bytes = bytes.fromhex(item[1][2:])
+ except ValueError:
+ item_bytes = bytes.fromhex(item[1])
+
+ item_value = await self.decode_scale(
+ type_string=value_type,
+ scale_bytes=item_bytes,
+ return_scale_obj=True,
+ )
+ except Exception as _:
+ if not ignore_decoding_errors:
+ raise
+ item_value = None
+
+ result.append([item_key, item_value])
+
+ return QueryMapResult(
+ records=result,
+ page_size=page_size,
+ module=module,
+ storage_function=storage_function,
+ params=params,
+ block_hash=block_hash,
+ substrate=self,
+ last_key=last_key,
+ max_results=max_results,
+ ignore_decoding_errors=ignore_decoding_errors,
+ )
+
+ async def submit_extrinsic(
+ self,
+ extrinsic: GenericExtrinsic,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = False,
+ ) -> "ExtrinsicReceipt":
+ """
+ Submit an extrinsic to the connected node, with the possibility to wait until the extrinsic is included
+ in a block and/or the block is finalized. The receipt returned provided information about the block and
+ triggered events
+
+ Parameters
+ ----------
+ extrinsic: Extrinsic The extrinsic to be sent to the network
+ wait_for_inclusion: wait until extrinsic is included in a block (only works for websocket connections)
+ wait_for_finalization: wait until extrinsic is finalized (only works for websocket connections)
+
+ Returns
+ -------
+ ExtrinsicReceipt
+
+ """
+
+ # Check requirements
+ if not isinstance(extrinsic, GenericExtrinsic):
+ raise TypeError("'extrinsic' must be of type Extrinsics")
+
+ async def result_handler(message: dict, subscription_id) -> tuple[dict, bool]:
+ """
+ Result handler function passed as an arg to _make_rpc_request as the result_handler
+ to handle the results of the extrinsic rpc call, which are multipart, and require
+ subscribing to the message
+
+ :param message: message received from the rpc call
+ :param subscription_id: subscription id received from the initial rpc call for the subscription
+
+ :returns: tuple containing the dict of the block info for the subscription, and bool for whether
+ the subscription is completed.
+ """
+ # Check if extrinsic is included and finalized
+ if "params" in message and isinstance(message["params"]["result"], dict):
+ # Convert result enum to lower for backwards compatibility
+ message_result = {
+ k.lower(): v for k, v in message["params"]["result"].items()
+ }
+
+ if "finalized" in message_result and wait_for_finalization:
+ # Created as a task because we don't actually care about the result
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request("author_unwatchExtrinsic", [subscription_id])
+ )
+ return {
+ "block_hash": message_result["finalized"],
+ "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()),
+ "finalized": True,
+ }, True
+ elif (
+ "inblock" in message_result
+ and wait_for_inclusion
+ and not wait_for_finalization
+ ):
+ # Created as a task because we don't actually care about the result
+ self._forgettable_task = asyncio.create_task(
+ self.rpc_request("author_unwatchExtrinsic", [subscription_id])
+ )
+ return {
+ "block_hash": message_result["inblock"],
+ "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()),
+ "finalized": False,
+ }, True
+ return message, False
+
+ if wait_for_inclusion or wait_for_finalization:
+ responses = (
+ await self._make_rpc_request(
+ [
+ self.make_payload(
+ "rpc_request",
+ "author_submitAndWatchExtrinsic",
+ [str(extrinsic.data)],
+ )
+ ],
+ result_handler=result_handler,
+ )
+ )["rpc_request"]
+ response = next(
+ (r for r in responses if "block_hash" in r and "extrinsic_hash" in r),
+ None,
+ )
+
+ if not response:
+ raise SubstrateRequestException(responses)
+
+ # Also, this will be a multipart response, so maybe should change to everything after the first response?
+ # The following code implies this will be a single response after the initial subscription id.
+ result = ExtrinsicReceipt(
+ substrate=self,
+ extrinsic_hash=response["extrinsic_hash"],
+ block_hash=response["block_hash"],
+ finalized=response["finalized"],
+ )
+
+ else:
+ response = await self.rpc_request(
+ "author_submitExtrinsic", [str(extrinsic.data)]
+ )
+
+ if "result" not in response:
+ raise SubstrateRequestException(response.get("error"))
+
+ result = ExtrinsicReceipt(substrate=self, extrinsic_hash=response["result"])
+
+ return result
+
+ async def get_metadata_call_function(
+ self,
+ module_name: str,
+ call_function_name: str,
+ block_hash: Optional[str] = None,
+ ) -> Optional[list]:
+ """
+ Retrieves a list of all call functions in metadata active for given block_hash (or chaintip if block_hash
+ is omitted)
+
+ :param module_name: name of the module
+ :param call_function_name: name of the call function
+ :param block_hash: optional block hash
+
+ :return: list of call functions
+ """
+ runtime = await self.init_runtime(block_hash=block_hash)
+
+ for pallet in runtime.metadata.pallets:
+ if pallet.name == module_name and pallet.calls:
+ for call in pallet.calls:
+ if call.name == call_function_name:
+ return call
+ return None
+
+ async def get_block_number(self, block_hash: Optional[str] = None) -> int:
+ """Async version of `substrateinterface.base.get_block_number` method."""
+ response = await self.rpc_request("chain_getHeader", [block_hash])
+
+ if "error" in response:
+ raise SubstrateRequestException(response["error"]["message"])
+
+ elif "result" in response:
+ if response["result"]:
+ return int(response["result"]["number"], 16)
+
+ async def close(self):
+ """
+ Closes the substrate connection, and the websocket connection.
+ """
+ try:
+ await self.ws.shutdown()
+ except AttributeError:
+ pass
diff --git a/bittensor/utils/balance.py b/bittensor/utils/balance.py
index 016db373a4..112036a95a 100644
--- a/bittensor/utils/balance.py
+++ b/bittensor/utils/balance.py
@@ -72,13 +72,13 @@ def __str__(self):
def __rich__(self):
int_tao, fract_tao = format(float(self.tao), "f").split(".")
- return f"[green]{self.unit}[/green][green]{int_tao}[/green][green].[/green][dim green]{fract_tao}[/dim green]"
+ return f"{self.unit}{int_tao}.{fract_tao}"
def __str_rao__(self):
return f"{self.rao_unit}{int(self.rao)}"
def __rich_rao__(self):
- return f"[green]{self.rao_unit}{int(self.rao)}[/green]"
+ return f"{self.rao_unit}{int(self.rao)}"
def __repr__(self):
return self.__str__()
diff --git a/bittensor/utils/delegates_details.py b/bittensor/utils/delegates_details.py
new file mode 100644
index 0000000000..88a5633e76
--- /dev/null
+++ b/bittensor/utils/delegates_details.py
@@ -0,0 +1,43 @@
+from dataclasses import dataclass
+from typing import Any, Optional
+
+
+@dataclass
+class DelegatesDetails:
+ display: str
+ additional: list[tuple[str, str]]
+ web: str
+ legal: Optional[str] = None
+ riot: Optional[str] = None
+ email: Optional[str] = None
+ pgp_fingerprint: Optional[str] = None
+ image: Optional[str] = None
+ twitter: Optional[str] = None
+
+ @classmethod
+ def from_chain_data(cls, data: dict[str, Any]) -> "DelegatesDetails":
+ def decode(key: str, default: Optional[str] = ""):
+ try:
+ if isinstance(data.get(key), dict):
+ value = next(data.get(key).values())
+ return bytes(value[0]).decode("utf-8")
+ elif isinstance(data.get(key), int):
+ return data.get(key)
+ elif isinstance(data.get(key), tuple):
+ return bytes(data.get(key)[0]).decode("utf-8")
+ else:
+ return default
+ except (UnicodeDecodeError, TypeError):
+ return default
+
+ return cls(
+ display=decode("display"),
+ additional=decode("additional", []),
+ web=decode("web"),
+ legal=decode("legal"),
+ riot=decode("riot"),
+ email=decode("email"),
+ pgp_fingerprint=decode("pgp_fingerprint", None),
+ image=decode("image"),
+ twitter=decode("twitter"),
+ )
diff --git a/bittensor/utils/deprecated.py b/bittensor/utils/deprecated.py
index 146e8395d0..124c0daac9 100644
--- a/bittensor/utils/deprecated.py
+++ b/bittensor/utils/deprecated.py
@@ -45,6 +45,7 @@
from bittensor_wallet import Keypair # noqa: F401
from bittensor.core import settings
+from bittensor.core.async_subtensor import AsyncSubtensor
from bittensor.core.axon import Axon
from bittensor.core.chain_data import ( # noqa: F401
AxonInfo,
@@ -116,6 +117,7 @@
from bittensor.utils.subnets import SubnetsAPI # noqa: F401
# Backwards compatibility with previous bittensor versions.
+async_subtensor = AsyncSubtensor
axon = Axon
config = Config
dendrite = Dendrite
diff --git a/bittensor/utils/networking.py b/bittensor/utils/networking.py
index 4c0c475851..76686d5fa4 100644
--- a/bittensor/utils/networking.py
+++ b/bittensor/utils/networking.py
@@ -191,7 +191,7 @@ def wrapper(self, *args, **kwargs):
)
!= 0
):
- logging.info("Reconnection substrate...")
+ logging.debug("Reconnecting to substrate...")
self._get_substrate()
# Execute the method if the connection is active or after reconnecting
return func(self, *args, **kwargs)
diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py
index 4dd6d8ec67..f190f668da 100644
--- a/bittensor/utils/registration.py
+++ b/bittensor/utils/registration.py
@@ -740,9 +740,7 @@ def _solve_for_difficulty_fast(
@retry(Exception, tries=3, delay=1)
-def _get_block_with_retry(
- subtensor: "Subtensor", netuid: int
-) -> tuple[int, int, bytes]:
+def _get_block_with_retry(subtensor: "Subtensor", netuid: int) -> tuple[int, int, str]:
"""
Gets the current block number, difficulty, and block hash from the substrate node.
diff --git a/bittensor/utils/weight_utils.py b/bittensor/utils/weight_utils.py
index f004af446c..feb281a04c 100644
--- a/bittensor/utils/weight_utils.py
+++ b/bittensor/utils/weight_utils.py
@@ -298,7 +298,7 @@ def process_weights_for_netuid(
if use_torch()
else np.ones((metagraph.n), dtype=np.int64) / metagraph.n
)
- logging.debug("final_weights", final_weights)
+ logging.debug("final_weights", *final_weights)
final_weights_count = (
torch.tensor(list(range(len(final_weights))))
if use_torch()
@@ -356,7 +356,7 @@ def process_weights_for_netuid(
normalized_weights = normalize_max_weight(
x=non_zero_weights, limit=max_weight_limit
)
- logging.debug("final_weights", normalized_weights)
+ logging.debug("final_weights", *normalized_weights)
return non_zero_weight_uids, normalized_weights
diff --git a/requirements/prod.txt b/requirements/prod.txt
index 17c73f6f25..1f3628e1a0 100644
--- a/requirements/prod.txt
+++ b/requirements/prod.txt
@@ -1,8 +1,10 @@
wheel
setuptools~=70.0.0
aiohttp~=3.9
+async-property==0.2.2
+backoff
bittensor-cli
-bt-decode
+bt-decode==0.2.0a0
colorama~=0.4.6
fastapi~=0.110.1
munch~=2.5.0
@@ -22,4 +24,5 @@ python-Levenshtein
scalecodec==1.2.11
substrate-interface~=1.7.9
uvicorn
-bittensor-wallet>=2.0.2
+websockets>12.0
+bittensor-wallet>=2.1.0
diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py
index 59170c9512..4a7b2ccf62 100644
--- a/tests/e2e_tests/conftest.py
+++ b/tests/e2e_tests/conftest.py
@@ -8,7 +8,7 @@
import pytest
from substrateinterface import SubstrateInterface
-from bittensor import logging
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.e2e_test_utils import (
clone_or_update_templates,
install_templates,
diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py
index 7e7de812b9..a21c4ae532 100644
--- a/tests/e2e_tests/test_axon.py
+++ b/tests/e2e_tests/test_axon.py
@@ -4,8 +4,8 @@
import pytest
import bittensor
-from bittensor import logging
from bittensor.utils import networking
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import register_neuron, register_subnet
from tests.e2e_tests.utils.e2e_test_utils import (
setup_wallet,
diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py
index 909b2e7088..5c03a3788b 100644
--- a/tests/e2e_tests/test_commit_weights.py
+++ b/tests/e2e_tests/test_commit_weights.py
@@ -3,8 +3,9 @@
import numpy as np
import pytest
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
@@ -48,7 +49,7 @@ async def test_commit_and_reveal_weights(local_chain):
), "Unable to register Alice as a neuron"
# Stake to become to top neuron after the first epoch
- add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000))
+ add_stake(local_chain, alice_wallet, Balance.from_tao(100_000))
# Enable commit_reveal on the subnet
assert sudo_set_hyperparameter_bool(
@@ -59,7 +60,7 @@ async def test_commit_and_reveal_weights(local_chain):
netuid,
), "Unable to enable commit reveal on the subnet"
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert subtensor.get_subnet_hyperparameters(
netuid=netuid,
).commit_reveal_weights_enabled, "Failed to enable commit/reveal"
@@ -91,6 +92,7 @@ async def test_commit_and_reveal_weights(local_chain):
call_params={"netuid": netuid, "weights_set_rate_limit": "0"},
return_error_message=True,
)
+
assert (
subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0
), "Failed to set weights_rate_limit"
diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py
index daeca22230..24484f68d3 100644
--- a/tests/e2e_tests/test_dendrite.py
+++ b/tests/e2e_tests/test_dendrite.py
@@ -3,20 +3,21 @@
import pytest
-import bittensor
-from bittensor import logging, Subtensor
-
-from tests.e2e_tests.utils.e2e_test_utils import (
- setup_wallet,
- template_path,
- templates_repo,
-)
+from bittensor.core.metagraph import Metagraph
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
register_neuron,
register_subnet,
add_stake,
wait_epoch,
)
+from tests.e2e_tests.utils.e2e_test_utils import (
+ setup_wallet,
+ template_path,
+ templates_repo,
+)
@pytest.mark.asyncio
@@ -56,7 +57,7 @@ async def test_dendrite(local_chain):
local_chain, bob_wallet, netuid
), f"Neuron wasn't registered to subnet {netuid}"
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
subtensor = Subtensor(network="ws://localhost:9945")
# Assert one neuron is Bob
@@ -69,10 +70,10 @@ async def test_dendrite(local_chain):
assert neuron.stake.tao == 0
# Stake to become to top neuron after the first epoch
- assert add_stake(local_chain, bob_wallet, bittensor.Balance.from_tao(10_000))
+ assert add_stake(local_chain, bob_wallet, Balance.from_tao(10_000))
# Refresh metagraph
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
old_neuron = metagraph.neurons[0]
# Assert stake is 10000
@@ -120,7 +121,7 @@ async def test_dendrite(local_chain):
await wait_epoch(subtensor, netuid=netuid)
# Refresh metagraph
- metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945")
+ metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945")
# Refresh validator neuron
updated_neuron = metagraph.neurons[0]
diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py
index a95cf37660..e0c6837dc3 100644
--- a/tests/e2e_tests/test_incentive.py
+++ b/tests/e2e_tests/test_incentive.py
@@ -20,6 +20,9 @@
from bittensor.core.metagraph import Metagraph
+FAST_BLOCKS_SPEEDUP_FACTOR = 5
+
+
@pytest.mark.asyncio
async def test_incentive(local_chain):
"""
@@ -158,6 +161,7 @@ async def test_incentive(local_chain):
version_key=0,
wait_for_inclusion=True,
wait_for_finalization=True,
+ period=5 * FAST_BLOCKS_SPEEDUP_FACTOR,
)
logging.info("Alice neuron set weights successfully")
diff --git a/tests/e2e_tests/test_liquid_alpha.py b/tests/e2e_tests/test_liquid_alpha.py
index d73162fbb4..4725704f61 100644
--- a/tests/e2e_tests/test_liquid_alpha.py
+++ b/tests/e2e_tests/test_liquid_alpha.py
@@ -1,5 +1,6 @@
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
register_neuron,
@@ -49,10 +50,10 @@ def test_liquid_alpha(local_chain):
), "Unable to register Alice as a neuron"
# Stake to become to top neuron after the first epoch
- add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000))
+ add_stake(local_chain, alice_wallet, Balance.from_tao(100_000))
# Assert liquid alpha is disabled
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
assert (
subtensor.get_subnet_hyperparameters(netuid=netuid).liquid_alpha_enabled
is False
@@ -118,7 +119,7 @@ def test_liquid_alpha(local_chain):
alpha_high_too_high = u16_max + 1 # One more than the max acceptable value
call_params = liquid_alpha_call_params(netuid, f"6553, {alpha_high_too_high}")
try:
- result, error_message = sudo_set_hyperparameter_values(
+ sudo_set_hyperparameter_values(
local_chain,
alice_wallet,
call_function="sudo_set_alpha_values",
diff --git a/tests/e2e_tests/test_metagraph.py b/tests/e2e_tests/test_metagraph.py
index ff16dde369..8999b30358 100644
--- a/tests/e2e_tests/test_metagraph.py
+++ b/tests/e2e_tests/test_metagraph.py
@@ -1,7 +1,8 @@
import time
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.balance import Balance
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
add_stake,
register_neuron,
@@ -64,7 +65,7 @@ def test_metagraph(local_chain):
).serialize(), "Subnet wasn't created successfully"
# Initialize metagraph
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
metagraph = subtensor.metagraph(netuid=1)
# Assert metagraph is empty
@@ -129,17 +130,17 @@ def test_metagraph(local_chain):
# Test staking with low balance
assert not add_stake(
- local_chain, dave_wallet, bittensor.Balance.from_tao(10_000)
+ local_chain, dave_wallet, Balance.from_tao(10_000)
), "Low balance stake should fail"
# Add stake by Bob
assert add_stake(
- local_chain, bob_wallet, bittensor.Balance.from_tao(10_000)
+ local_chain, bob_wallet, Balance.from_tao(10_000)
), "Failed to add stake for Bob"
# Assert stake is added after updating metagraph
metagraph.sync(subtensor=subtensor)
- assert metagraph.neurons[0].stake == bittensor.Balance.from_tao(
+ assert metagraph.neurons[0].stake == Balance.from_tao(
10_000
), "Bob's stake not updated in metagraph"
diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py
index 7a4e1847e6..d00e587fba 100644
--- a/tests/e2e_tests/test_subtensor_functions.py
+++ b/tests/e2e_tests/test_subtensor_functions.py
@@ -3,8 +3,8 @@
import pytest
-import bittensor
-from bittensor import logging
+from bittensor.core.subtensor import Subtensor
+from bittensor.utils.btlogging import logging
from tests.e2e_tests.utils.chain_interactions import (
register_neuron,
register_subnet,
@@ -31,7 +31,7 @@ async def test_subtensor_extrinsics(local_chain):
AssertionError: If any of the checks or verifications fail
"""
netuid = 1
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
# Subnets 0 and 3 are bootstrapped from the start
assert subtensor.get_subnets() == [0, 3]
@@ -138,7 +138,7 @@ async def test_subtensor_extrinsics(local_chain):
await asyncio.sleep(
5
) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data
- subtensor = bittensor.Subtensor(network="ws://localhost:9945")
+ subtensor = Subtensor(network="ws://localhost:9945")
# Verify neuron info is updated after running as a validator
neuron_info = subtensor.get_neuron_for_pubkey_and_subnet(
diff --git a/tests/e2e_tests/utils/chain_interactions.py b/tests/e2e_tests/utils/chain_interactions.py
index aad53812c8..20e4a65dea 100644
--- a/tests/e2e_tests/utils/chain_interactions.py
+++ b/tests/e2e_tests/utils/chain_interactions.py
@@ -6,7 +6,7 @@
import asyncio
from typing import Union, Optional, TYPE_CHECKING
-from bittensor import logging
+from bittensor.utils.btlogging import logging
# for typing purposes
if TYPE_CHECKING:
diff --git a/tests/integration_tests/utils/test_init.py b/tests/integration_tests/utils/test_init.py
new file mode 100644
index 0000000000..000e94b3d8
--- /dev/null
+++ b/tests/integration_tests/utils/test_init.py
@@ -0,0 +1,42 @@
+import os
+import shutil
+
+from bittensor_wallet import Wallet, Keyfile, Keypair
+import pytest
+
+from bittensor import utils
+
+
+def test_unlock_key(monkeypatch):
+ # Ensure path is clean before we run the tests
+ if os.path.exists("/tmp/bittensor-tests-wallets"):
+ shutil.rmtree("/tmp/bittensor-tests-wallets")
+
+ wallet = Wallet(path="/tmp/bittensor-tests-wallets")
+ cold_kf = Keyfile("/tmp/bittensor-tests-wallets/default/coldkey", name="default")
+ kp = Keypair.create_from_mnemonic(
+ "stool feel open east woman high can denial forget screen trust salt"
+ )
+ cold_kf.set_keypair(kp, False, False)
+ cold_kf.encrypt("1234password1234")
+ hot_kf = Keyfile("/tmp/bittensor-tests-wallets/default/hotkey", name="default")
+ hkp = Keypair.create_from_mnemonic(
+ "stool feel open east woman high can denial forget screen trust salt"
+ )
+ hot_kf.set_keypair(hkp, False, False)
+ hot_kf.encrypt("1234hotkey1234")
+ monkeypatch.setattr("getpass.getpass", lambda _: "badpassword1234")
+ result = utils.unlock_key(wallet)
+ assert result.success is False
+ monkeypatch.setattr("getpass.getpass", lambda _: "1234password1234")
+ result = utils.unlock_key(wallet)
+ assert result.success is True
+ monkeypatch.setattr("getpass.getpass", lambda _: "badpassword1234")
+ result = utils.unlock_key(wallet, "hotkey")
+ assert result.success is False
+ with pytest.raises(ValueError):
+ utils.unlock_key(wallet, "mycoldkey")
+
+ # Ensure test wallets path is deleted after running tests
+ if os.path.exists("/tmp/bittensor-tests-wallets"):
+ shutil.rmtree("/tmp/bittensor-tests-wallets")
diff --git a/tests/unit_tests/extrinsics/test_init.py b/tests/unit_tests/extrinsics/test_init.py
index 8a2480a9b9..8ff60d2de6 100644
--- a/tests/unit_tests/extrinsics/test_init.py
+++ b/tests/unit_tests/extrinsics/test_init.py
@@ -1,9 +1,10 @@
"""Tests for bittensor/extrinsics/__ini__ module."""
from bittensor.utils import format_error_message
+from tests.unit_tests.extrinsics.test_commit_weights import subtensor
-def test_format_error_message_with_right_error_message():
+def test_format_error_message_with_right_error_message(mocker):
"""Verify that error message from extrinsic response parses correctly."""
# Prep
fake_error_message = {
@@ -13,7 +14,7 @@ def test_format_error_message_with_right_error_message():
}
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
@@ -22,13 +23,13 @@ def test_format_error_message_with_right_error_message():
assert "Some error description." in result
-def test_format_error_message_with_empty_error_message():
+def test_format_error_message_with_empty_error_message(mocker):
"""Verify that empty error message from extrinsic response parses correctly."""
# Prep
fake_error_message = {}
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
@@ -37,13 +38,13 @@ def test_format_error_message_with_empty_error_message():
assert "Unknown Description" in result
-def test_format_error_message_with_wrong_type_error_message():
+def test_format_error_message_with_wrong_type_error_message(mocker):
"""Verify that error message from extrinsic response with wrong type parses correctly."""
# Prep
fake_error_message = None
# Call
- result = format_error_message(fake_error_message)
+ result = format_error_message(fake_error_message, substrate=mocker.MagicMock())
# Assertions
diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py
index 0cd663f0b7..f447915d2f 100644
--- a/tests/unit_tests/extrinsics/test_set_weights.py
+++ b/tests/unit_tests/extrinsics/test_set_weights.py
@@ -55,7 +55,7 @@ def mock_wallet():
True,
False,
False,
- "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.",
+ "Mock error message",
),
],
ids=[
@@ -204,7 +204,7 @@ def test_do_set_weights_is_not_success(mock_subtensor, mocker):
mock_subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once()
assert result == (
False,
- mock_subtensor.substrate.submit_extrinsic.return_value.error_message,
+ "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.",
)
diff --git a/tests/unit_tests/test_async_subtensor.py b/tests/unit_tests/test_async_subtensor.py
new file mode 100644
index 0000000000..4db725da39
--- /dev/null
+++ b/tests/unit_tests/test_async_subtensor.py
@@ -0,0 +1,2152 @@
+import pytest
+
+from bittensor.core import async_subtensor
+
+
+@pytest.fixture(autouse=True)
+def subtensor(mocker):
+ fake_async_substrate = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface
+ )
+ mocker.patch.object(
+ async_subtensor, "AsyncSubstrateInterface", return_value=fake_async_substrate
+ )
+ return async_subtensor.AsyncSubtensor()
+
+
+def test_decode_ss58_tuples_in_proposal_vote_data(mocker):
+ """Tests that ProposalVoteData instance instantiation works properly,"""
+ # Preps
+ mocked_decode_account_id = mocker.patch.object(async_subtensor, "decode_account_id")
+ fake_proposal_dict = {
+ "index": "0",
+ "threshold": 1,
+ "ayes": ("0 line", "1 line"),
+ "nays": ("2 line", "3 line"),
+ "end": 123,
+ }
+
+ # Call
+ async_subtensor.ProposalVoteData(fake_proposal_dict)
+
+ # Asserts
+ assert mocked_decode_account_id.call_count == len(fake_proposal_dict["ayes"]) + len(
+ fake_proposal_dict["nays"]
+ )
+ assert mocked_decode_account_id.mock_calls == [
+ mocker.call("0"),
+ mocker.call("1"),
+ mocker.call("2"),
+ mocker.call("3"),
+ ]
+
+
+def test_decode_hex_identity_dict_with_single_byte_utf8():
+ """Tests _decode_hex_identity_dict when value is a single utf-8 decodable byte."""
+ info_dict = {"name": (b"Neuron",)}
+ result = async_subtensor._decode_hex_identity_dict(info_dict)
+ assert result["name"] == "Neuron"
+
+
+def test_decode_hex_identity_dict_with_non_utf8_data():
+ """Tests _decode_hex_identity_dict when value cannot be decoded as utf-8."""
+ info_dict = {"data": (b"\xff\xfe",)}
+ result = async_subtensor._decode_hex_identity_dict(info_dict)
+ assert result["data"] == (b"\xff\xfe",)
+
+
+def test_decode_hex_identity_dict_with_non_tuple_value():
+ """Tests _decode_hex_identity_dict when value is not a tuple."""
+ info_dict = {"info": "regular_string"}
+ result = async_subtensor._decode_hex_identity_dict(info_dict)
+ assert result["info"] == "regular_string"
+
+
+def test_decode_hex_identity_dict_with_nested_dict():
+ """Tests _decode_hex_identity_dict with a nested dictionary."""
+ info_dict = {"identity": {"rank": (65, 66, 67)}}
+ result = async_subtensor._decode_hex_identity_dict(info_dict)
+ assert result["identity"] == "41 4243"
+
+
+def test__str__return(subtensor):
+ """Simply tests the result if printing subtensor instance."""
+ # Asserts
+ assert (
+ str(subtensor)
+ == "Network: finney, Chain: wss://entrypoint-finney.opentensor.ai:443"
+ )
+
+
+@pytest.mark.asyncio
+async def test_async_subtensor_magic_methods(mocker):
+ """Tests async magic methods of AsyncSubtensor class."""
+ # Preps
+ fake_async_substrate = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface
+ )
+ mocker.patch.object(
+ async_subtensor, "AsyncSubstrateInterface", return_value=fake_async_substrate
+ )
+
+ # Call
+ subtensor = async_subtensor.AsyncSubtensor(network="local")
+ async with subtensor:
+ pass
+
+ # Asserts
+ fake_async_substrate.__aenter__.assert_called_once()
+ fake_async_substrate.__aexit__.assert_called_once()
+ fake_async_substrate.close.assert_awaited_once()
+
+
+@pytest.mark.asyncio
+async def test_encode_params(subtensor, mocker):
+ """Tests encode_params happy path."""
+ # Preps
+ subtensor.substrate.create_scale_object = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.create_scale_object
+ )
+ subtensor.substrate.create_scale_object.return_value.encode = mocker.Mock(
+ return_value=b""
+ )
+
+ call_definition = {
+ "params": [
+ {"name": "coldkey", "type": "Vec"},
+ {"name": "uid", "type": "u16"},
+ ]
+ }
+ params = ["coldkey", "uid"]
+
+ # Call
+ decoded_params = await subtensor.encode_params(
+ call_definition=call_definition, params=params
+ )
+
+ # Asserts
+ subtensor.substrate.create_scale_object.call_args(
+ mocker.call("coldkey"),
+ mocker.call("Vec"),
+ mocker.call("uid"),
+ mocker.call("u16"),
+ )
+ assert decoded_params == "0x"
+
+
+@pytest.mark.asyncio
+async def test_encode_params_raises_error(subtensor, mocker):
+ """Tests encode_params with raised error."""
+ # Preps
+ subtensor.substrate.create_scale_object = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.create_scale_object
+ )
+ subtensor.substrate.create_scale_object.return_value.encode = mocker.Mock(
+ return_value=b""
+ )
+
+ call_definition = {
+ "params": [
+ {"name": "coldkey", "type": "Vec"},
+ ]
+ }
+ params = {"undefined param": "some value"}
+
+ # Call and assert
+ with pytest.raises(ValueError):
+ await subtensor.encode_params(call_definition=call_definition, params=params)
+
+ subtensor.substrate.create_scale_object.return_value.encode.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_get_current_block(subtensor):
+ """Tests get_current_block method."""
+ # Call
+ result = await subtensor.get_current_block()
+
+ # Asserts
+ subtensor.substrate.get_block_number.assert_called_once()
+ assert result == subtensor.substrate.get_block_number.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_block_hash_without_block_id_aka_none(subtensor):
+ """Tests get_block_hash method without passed block_id."""
+ # Call
+ result = await subtensor.get_block_hash()
+
+ # Asserts
+ assert result == subtensor.substrate.get_chain_head.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_block_hash_with_block_id(subtensor):
+ """Tests get_block_hash method with passed block_id."""
+ # Call
+ result = await subtensor.get_block_hash(block_id=1)
+
+ # Asserts
+ assert result == subtensor.substrate.get_block_hash.return_value
+
+
+@pytest.mark.asyncio
+async def test_is_hotkey_registered_any(subtensor, mocker):
+ """Tests is_hotkey_registered_any method."""
+ # Preps
+ mocked_get_netuids_for_hotkey = mocker.AsyncMock(
+ return_value=[1, 2], autospec=subtensor.get_netuids_for_hotkey
+ )
+ subtensor.get_netuids_for_hotkey = mocked_get_netuids_for_hotkey
+
+ # Call
+ result = await subtensor.is_hotkey_registered_any(
+ hotkey_ss58="hotkey", block_hash="FAKE_HASH"
+ )
+
+ # Asserts
+ assert result is (len(mocked_get_netuids_for_hotkey.return_value) > 0)
+
+
+@pytest.mark.asyncio
+async def test_get_subnet_burn_cost(subtensor, mocker):
+ """Tests get_subnet_burn_cost method."""
+ # Preps
+ mocked_query_runtime_api = mocker.AsyncMock(autospec=subtensor.query_runtime_api)
+ subtensor.query_runtime_api = mocked_query_runtime_api
+ fake_block_hash = None
+
+ # Call
+ result = await subtensor.get_subnet_burn_cost(block_hash=fake_block_hash)
+
+ # Assert
+ assert result == mocked_query_runtime_api.return_value
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetRegistrationRuntimeApi",
+ method="get_network_registration_cost",
+ params=[],
+ block_hash=fake_block_hash,
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_total_subnets(subtensor, mocker):
+ """Tests get_total_subnets method."""
+ # Preps
+ mocked_substrate_query = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.query
+ )
+ subtensor.substrate.query = mocked_substrate_query
+ fake_block_hash = None
+
+ # Call
+ result = await subtensor.get_total_subnets(block_hash=fake_block_hash)
+
+ # Assert
+ assert result == mocked_substrate_query.return_value
+ mocked_substrate_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="TotalNetworks",
+ params=[],
+ block_hash=fake_block_hash,
+ )
+
+
+@pytest.mark.parametrize(
+ "records, response",
+ [([(0, True), (1, False), (3, False), (3, True)], [0, 3]), ([], [])],
+ ids=["with records", "empty-records"],
+)
+@pytest.mark.asyncio
+async def test_get_subnets(subtensor, mocker, records, response):
+ """Tests get_subnets method with any return."""
+ # Preps
+ fake_result = mocker.AsyncMock(autospec=list)
+ fake_result.records = records
+ fake_result.__aiter__.return_value = iter(records)
+
+ mocked_substrate_query_map = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.query_map,
+ return_value=fake_result,
+ )
+
+ subtensor.substrate.query_map = mocked_substrate_query_map
+ fake_block_hash = None
+
+ # Call
+ result = await subtensor.get_subnets(block_hash=fake_block_hash)
+
+ # Asserts
+ mocked_substrate_query_map.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ block_hash=fake_block_hash,
+ reuse_block_hash=True,
+ )
+ assert result == response
+
+
+@pytest.mark.parametrize(
+ "hotkey_ss58_in_result",
+ [True, False],
+ ids=["hotkey-exists", "hotkey-doesnt-exist"],
+)
+@pytest.mark.asyncio
+async def test_is_hotkey_delegate(subtensor, mocker, hotkey_ss58_in_result):
+ """Tests is_hotkey_delegate method with any return."""
+ # Preps
+ fake_hotkey_ss58 = "hotkey_58"
+ mocked_get_delegates = mocker.AsyncMock(
+ return_value=[
+ mocker.Mock(hotkey_ss58=fake_hotkey_ss58 if hotkey_ss58_in_result else "")
+ ]
+ )
+ subtensor.get_delegates = mocked_get_delegates
+
+ # Call
+ result = await subtensor.is_hotkey_delegate(
+ hotkey_ss58=fake_hotkey_ss58, block_hash=None, reuse_block=True
+ )
+
+ # Asserts
+ assert result == hotkey_ss58_in_result
+ mocked_get_delegates.assert_called_once_with(block_hash=None, reuse_block=True)
+
+
+@pytest.mark.parametrize(
+ "fake_hex_bytes_result, response", [(None, []), ("0xaabbccdd", b"\xaa\xbb\xcc\xdd")]
+)
+@pytest.mark.asyncio
+async def test_get_delegates(subtensor, mocker, fake_hex_bytes_result, response):
+ """Tests get_delegates method."""
+ # Preps
+ mocked_query_runtime_api = mocker.AsyncMock(
+ autospec=subtensor.query_runtime_api, return_value=fake_hex_bytes_result
+ )
+ subtensor.query_runtime_api = mocked_query_runtime_api
+ mocked_delegate_info_list_from_vec_u8 = mocker.Mock()
+ async_subtensor.DelegateInfo.list_from_vec_u8 = (
+ mocked_delegate_info_list_from_vec_u8
+ )
+
+ # Call
+ result = await subtensor.get_delegates(block_hash=None, reuse_block=True)
+
+ # Asserts
+ if fake_hex_bytes_result:
+ assert result == mocked_delegate_info_list_from_vec_u8.return_value
+ mocked_delegate_info_list_from_vec_u8.assert_called_once_with(
+ bytes.fromhex(fake_hex_bytes_result[2:])
+ )
+ else:
+ assert result == response
+
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="DelegateInfoRuntimeApi",
+ method="get_delegates",
+ params=[],
+ block_hash=None,
+ reuse_block=True,
+ )
+
+
+@pytest.mark.parametrize(
+ "fake_hex_bytes_result, response", [(None, []), ("zz001122", b"\xaa\xbb\xcc\xdd")]
+)
+@pytest.mark.asyncio
+async def test_get_stake_info_for_coldkey(
+ subtensor, mocker, fake_hex_bytes_result, response
+):
+ """Tests get_stake_info_for_coldkey method."""
+ # Preps
+ fake_coldkey_ss58 = "fake_coldkey_58"
+
+ mocked_ss58_to_vec_u8 = mocker.Mock()
+ async_subtensor.ss58_to_vec_u8 = mocked_ss58_to_vec_u8
+
+ mocked_query_runtime_api = mocker.AsyncMock(
+ autospec=subtensor.query_runtime_api, return_value=fake_hex_bytes_result
+ )
+ subtensor.query_runtime_api = mocked_query_runtime_api
+
+ mocked_stake_info_list_from_vec_u8 = mocker.Mock()
+ async_subtensor.StakeInfo.list_from_vec_u8 = mocked_stake_info_list_from_vec_u8
+
+ # Call
+ result = await subtensor.get_stake_info_for_coldkey(
+ coldkey_ss58=fake_coldkey_ss58, block_hash=None, reuse_block=True
+ )
+
+ # Asserts
+ if fake_hex_bytes_result:
+ assert result == mocked_stake_info_list_from_vec_u8.return_value
+ mocked_stake_info_list_from_vec_u8.assert_called_once_with(
+ bytes.fromhex(fake_hex_bytes_result[2:])
+ )
+ else:
+ assert result == response
+
+ mocked_ss58_to_vec_u8.assert_called_once_with(fake_coldkey_ss58)
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="StakeInfoRuntimeApi",
+ method="get_stake_info_for_coldkey",
+ params=[mocked_ss58_to_vec_u8.return_value],
+ block_hash=None,
+ reuse_block=True,
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_stake_for_coldkey_and_hotkey(subtensor, mocker):
+ """Tests get_stake_for_coldkey_and_hotkey method."""
+ # Preps
+ mocked_substrate_query = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.query
+ )
+ subtensor.substrate.query = mocked_substrate_query
+
+ spy_balance = mocker.spy(async_subtensor, "Balance")
+
+ # Call
+ result = await subtensor.get_stake_for_coldkey_and_hotkey(
+ hotkey_ss58="hotkey", coldkey_ss58="coldkey", block_hash=None
+ )
+
+ # Asserts
+ mocked_substrate_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Stake",
+ params=["hotkey", "coldkey"],
+ block_hash=None,
+ )
+ assert result == spy_balance.from_rao.return_value
+ spy_balance.from_rao.assert_called_once_with(mocked_substrate_query.return_value)
+
+
+@pytest.mark.asyncio
+async def test_query_runtime_api(subtensor, mocker):
+ """Tests query_runtime_api method."""
+ # Preps
+ fake_runtime_api = "DelegateInfoRuntimeApi"
+ fake_method = "get_delegated"
+ fake_params = [1, 2, 3]
+ fake_block_hash = None
+ reuse_block = False
+
+ mocked_encode_params = mocker.AsyncMock()
+ subtensor.encode_params = mocked_encode_params
+
+ mocked_rpc_request = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.rpc_request
+ )
+ subtensor.substrate.rpc_request = mocked_rpc_request
+
+ mocked_scalecodec = mocker.Mock(autospec=async_subtensor.scalecodec.ScaleBytes)
+ async_subtensor.scalecodec.ScaleBytes = mocked_scalecodec
+
+ mocked_runtime_configuration = mocker.Mock(
+ autospec=async_subtensor.RuntimeConfiguration
+ )
+ async_subtensor.RuntimeConfiguration = mocked_runtime_configuration
+
+ mocked_load_type_registry_preset = mocker.Mock()
+ async_subtensor.load_type_registry_preset = mocked_load_type_registry_preset
+
+ # Call
+ result = await subtensor.query_runtime_api(
+ runtime_api=fake_runtime_api,
+ method=fake_method,
+ params=fake_params,
+ block_hash=fake_block_hash,
+ reuse_block=reuse_block,
+ )
+
+ # Asserts
+
+ mocked_encode_params.assert_called_once_with(
+ call_definition={
+ "params": [{"name": "coldkey", "type": "Vec"}],
+ "type": "Vec",
+ },
+ params=[1, 2, 3],
+ )
+ mocked_rpc_request.assert_called_once_with(
+ method="state_call",
+ params=[f"{fake_runtime_api}_{fake_method}", mocked_encode_params.return_value],
+ reuse_block_hash=reuse_block,
+ )
+ mocked_runtime_configuration.assert_called_once()
+ assert (
+ mocked_runtime_configuration.return_value.update_type_registry.call_count == 2
+ )
+
+ mocked_runtime_configuration.return_value.create_scale_object.assert_called_once_with(
+ "Vec", mocked_scalecodec.return_value
+ )
+
+ assert (
+ result
+ == mocked_runtime_configuration.return_value.create_scale_object.return_value.decode.return_value
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_balance(subtensor, mocker):
+ """Tests get_balance method."""
+ # Preps
+ fake_addresses = ("a1", "a2")
+ fake_block_hash = None
+
+ mocked_substrate_create_storage_key = mocker.AsyncMock()
+ subtensor.substrate.create_storage_key = mocked_substrate_create_storage_key
+
+ mocked_batch_0_call = mocker.Mock(
+ params=[
+ 0,
+ ]
+ )
+ mocked_batch_1_call = {"data": {"free": 1000}}
+ mocked_substrate_query_multi = mocker.AsyncMock(
+ return_value=[
+ (mocked_batch_0_call, mocked_batch_1_call),
+ ]
+ )
+
+ subtensor.substrate.query_multi = mocked_substrate_query_multi
+
+ # Call
+ result = await subtensor.get_balance(*fake_addresses, block_hash=fake_block_hash)
+
+ assert mocked_substrate_create_storage_key.call_count == len(fake_addresses)
+ mocked_substrate_query_multi.assert_called_once()
+ assert result == {0: async_subtensor.Balance(1000)}
+
+
+@pytest.mark.parametrize("balance", [100, 100.1])
+@pytest.mark.asyncio
+async def test_get_transfer_fee(subtensor, mocker, balance):
+ """Tests get_transfer_fee method."""
+ # Preps
+ fake_wallet = mocker.Mock(coldkeypub="coldkeypub", autospec=async_subtensor.Wallet)
+ fake_dest = "fake_dest"
+ fake_value = balance
+
+ mocked_compose_call = mocker.AsyncMock()
+ subtensor.substrate.compose_call = mocked_compose_call
+
+ mocked_get_payment_info = mocker.AsyncMock(return_value={"partialFee": 100})
+ subtensor.substrate.get_payment_info = mocked_get_payment_info
+
+ # Call
+ result = await subtensor.get_transfer_fee(
+ wallet=fake_wallet, dest=fake_dest, value=fake_value
+ )
+
+ # Assertions
+ mocked_compose_call.assert_awaited_once()
+ mocked_compose_call.assert_called_once_with(
+ call_module="Balances",
+ call_function="transfer_allow_death",
+ call_params={
+ "dest": fake_dest,
+ "value": async_subtensor.Balance.from_rao(fake_value),
+ },
+ )
+
+ assert isinstance(result, async_subtensor.Balance)
+ mocked_get_payment_info.assert_awaited_once()
+ mocked_get_payment_info.assert_called_once_with(
+ call=mocked_compose_call.return_value, keypair="coldkeypub"
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_transfer_fee_with_non_balance_accepted_value_type(subtensor, mocker):
+ """Tests get_transfer_fee method with non balance accepted value type."""
+ # Preps
+ fake_wallet = mocker.Mock(coldkeypub="coldkeypub", autospec=async_subtensor.Wallet)
+ fake_dest = "fake_dest"
+ fake_value = "1000"
+
+ # Call
+ result = await subtensor.get_transfer_fee(
+ wallet=fake_wallet, dest=fake_dest, value=fake_value
+ )
+
+ # Assertions
+ assert result == async_subtensor.Balance.from_rao(int(2e7))
+
+
+@pytest.mark.asyncio
+async def test_get_transfer_with_exception(subtensor, mocker):
+ """Tests get_transfer_fee method handle Exception properly."""
+ # Preps
+ fake_value = 123
+
+ mocked_compose_call = mocker.AsyncMock()
+ subtensor.substrate.compose_call = mocked_compose_call
+ subtensor.substrate.get_payment_info.side_effect = Exception
+
+ # Call
+ result = await subtensor.get_transfer_fee(
+ wallet=mocker.Mock(), dest=mocker.Mock(), value=fake_value
+ )
+
+ # Assertions
+ assert result == async_subtensor.Balance.from_rao(int(2e7))
+
+
+@pytest.mark.asyncio
+async def test_get_total_stake_for_coldkey(subtensor, mocker):
+ """Tests get_total_stake_for_coldkey method."""
+ # Preps
+ fake_addresses = ("a1", "a2")
+ fake_block_hash = None
+
+ mocked_substrate_create_storage_key = mocker.AsyncMock()
+ subtensor.substrate.create_storage_key = mocked_substrate_create_storage_key
+
+ mocked_batch_0_call = mocker.Mock(
+ params=[
+ 0,
+ ]
+ )
+ mocked_batch_1_call = 0
+ mocked_substrate_query_multi = mocker.AsyncMock(
+ return_value=[
+ (mocked_batch_0_call, mocked_batch_1_call),
+ ]
+ )
+
+ subtensor.substrate.query_multi = mocked_substrate_query_multi
+
+ # Call
+ result = await subtensor.get_total_stake_for_coldkey(
+ *fake_addresses, block_hash=fake_block_hash
+ )
+
+ assert mocked_substrate_create_storage_key.call_count == len(fake_addresses)
+ mocked_substrate_query_multi.assert_called_once()
+ assert result == {0: async_subtensor.Balance(mocked_batch_1_call)}
+
+
+@pytest.mark.asyncio
+async def test_get_total_stake_for_hotkey(subtensor, mocker):
+ """Tests get_total_stake_for_hotkey method."""
+ # Preps
+ fake_addresses = ("a1", "a2")
+ fake_block_hash = None
+ reuse_block = True
+
+ mocked_substrate_query_multiple = mocker.AsyncMock(return_value={0: 1})
+
+ subtensor.substrate.query_multiple = mocked_substrate_query_multiple
+
+ # Call
+ result = await subtensor.get_total_stake_for_hotkey(
+ *fake_addresses, block_hash=fake_block_hash, reuse_block=reuse_block
+ )
+
+ # Assertions
+ mocked_substrate_query_multiple.assert_called_once_with(
+ params=list(fake_addresses),
+ module="SubtensorModule",
+ storage_function="TotalHotkeyStake",
+ block_hash=fake_block_hash,
+ reuse_block_hash=reuse_block,
+ )
+ mocked_substrate_query_multiple.assert_called_once()
+ assert result == {0: async_subtensor.Balance(1)}
+
+
+@pytest.mark.parametrize(
+ "records, response",
+ [([(0, True), (1, False), (3, False), (3, True)], [0, 3]), ([], [])],
+ ids=["with records", "empty-records"],
+)
+@pytest.mark.asyncio
+async def test_get_netuids_for_hotkey(subtensor, mocker, records, response):
+ """Tests get_netuids_for_hotkey method."""
+ # Preps
+ fake_result = mocker.AsyncMock(autospec=list)
+ fake_result.records = records
+ fake_result.__aiter__.return_value = iter(records)
+
+ mocked_substrate_query_map = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.query_map,
+ return_value=fake_result,
+ )
+
+ subtensor.substrate.query_map = mocked_substrate_query_map
+ fake_hotkey_ss58 = "hotkey_58"
+ fake_block_hash = None
+
+ # Call
+ result = await subtensor.get_netuids_for_hotkey(
+ hotkey_ss58=fake_hotkey_ss58, block_hash=fake_block_hash, reuse_block=True
+ )
+
+ # Assertions
+ mocked_substrate_query_map.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="IsNetworkMember",
+ params=[fake_hotkey_ss58],
+ block_hash=fake_block_hash,
+ reuse_block_hash=True,
+ )
+ assert result == response
+
+
+@pytest.mark.asyncio
+async def test_subnet_exists(subtensor, mocker):
+ """Tests subnet_exists method ."""
+ # Preps
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_reuse_block_hash = True
+
+ mocked_substrate_query = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.query
+ )
+ subtensor.substrate.query = mocked_substrate_query
+
+ # Call
+ result = await subtensor.subnet_exists(
+ netuid=fake_netuid,
+ block_hash=fake_block_hash,
+ reuse_block=fake_reuse_block_hash,
+ )
+
+ # Asserts
+ mocked_substrate_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="NetworksAdded",
+ params=[fake_netuid],
+ block_hash=fake_block_hash,
+ reuse_block_hash=fake_reuse_block_hash,
+ )
+ assert result == mocked_substrate_query.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_happy_path(subtensor, mocker):
+ """Tests get_hyperparameter method with happy path."""
+ # Preps
+ fake_param_name = "param_name"
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_reuse_block_hash = True
+
+ # kind of fake subnet exists
+ mocked_subtensor_subnet_exists = mocker.AsyncMock(return_value=True)
+ subtensor.subnet_exists = mocked_subtensor_subnet_exists
+
+ mocked_substrate_query = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.query
+ )
+ subtensor.substrate.query = mocked_substrate_query
+
+ # Call
+ result = await subtensor.get_hyperparameter(
+ param_name=fake_param_name,
+ netuid=fake_netuid,
+ block_hash=fake_block_hash,
+ reuse_block=fake_reuse_block_hash,
+ )
+
+ # Assertions
+ mocked_subtensor_subnet_exists.assert_called_once()
+ mocked_substrate_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function=fake_param_name,
+ params=[fake_netuid],
+ block_hash=fake_block_hash,
+ reuse_block_hash=fake_reuse_block_hash,
+ )
+ assert result == mocked_substrate_query.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_hyperparameter_if_subnet_does_not_exist(subtensor, mocker):
+ """Tests get_hyperparameter method if subnet does not exist."""
+ # Preps
+ # kind of fake subnet doesn't exist
+ mocked_subtensor_subnet_exists = mocker.AsyncMock(return_value=False)
+ subtensor.subnet_exists = mocked_subtensor_subnet_exists
+
+ mocked_substrate_query = mocker.AsyncMock(
+ autospec=async_subtensor.AsyncSubstrateInterface.query
+ )
+ subtensor.substrate.query = mocked_substrate_query
+
+ # Call
+ result = await subtensor.get_hyperparameter(mocker.Mock(), mocker.Mock())
+
+ # Assertions
+ mocked_subtensor_subnet_exists.assert_called_once()
+ mocked_substrate_query.assert_not_called()
+ assert result is None
+
+
+@pytest.mark.parametrize(
+ "all_netuids, filter_for_netuids, response",
+ [([1, 2], [3, 4], []), ([1, 2], [1, 3], [1]), ([1, 2], None, [1, 2])],
+ ids=[
+ "all arguments -> no comparison",
+ "all arguments -> is comparison",
+ "not filter_for_netuids",
+ ],
+)
+@pytest.mark.asyncio
+async def test_filter_netuids_by_registered_hotkeys(
+ subtensor, mocker, all_netuids, filter_for_netuids, response
+):
+ """Tests filter_netuids_by_registered_hotkeys method."""
+ # Preps
+ fake_wallet_1 = mocker.Mock(autospec=async_subtensor.Wallet)
+ fake_wallet_1.hotkey.ss58_address = "ss58_address_1"
+ fake_wallet_2 = mocker.Mock(autospec=async_subtensor.Wallet)
+ fake_wallet_2.hotkey.ss58_address = "ss58_address_2"
+
+ fake_all_netuids = all_netuids
+ fake_filter_for_netuids = filter_for_netuids
+ fake_all_hotkeys = [fake_wallet_1, fake_wallet_2]
+ fake_block_hash = "fake_block_hash"
+ fake_reuse_block = True
+
+ mocked_get_netuids_for_hotkey = mocker.AsyncMock(
+ # returned subnets list
+ return_value=[1, 2]
+ )
+ subtensor.get_netuids_for_hotkey = mocked_get_netuids_for_hotkey
+
+ # Call
+
+ result = await subtensor.filter_netuids_by_registered_hotkeys(
+ all_netuids=fake_all_netuids,
+ filter_for_netuids=fake_filter_for_netuids,
+ all_hotkeys=fake_all_hotkeys,
+ block_hash=fake_block_hash,
+ reuse_block=fake_reuse_block,
+ )
+
+ # Asserts
+ mocked_get_netuids_for_hotkey.call_count = len(fake_all_netuids)
+ assert mocked_get_netuids_for_hotkey.mock_calls == [
+ mocker.call(
+ w.hotkey.ss58_address,
+ block_hash=fake_block_hash,
+ reuse_block=fake_reuse_block,
+ )
+ for w in fake_all_hotkeys
+ ]
+ assert result == response
+
+
+@pytest.mark.asyncio
+async def test_get_existential_deposit_happy_path(subtensor, mocker):
+ """Tests get_existential_deposit method."""
+ # Preps
+ fake_block_hash = "block_hash"
+ fake_reuse_block_hash = True
+
+ mocked_substrate_get_constant = mocker.AsyncMock(return_value=1)
+ subtensor.substrate.get_constant = mocked_substrate_get_constant
+
+ spy_balance_from_rao = mocker.spy(async_subtensor.Balance, "from_rao")
+
+ # Call
+ result = await subtensor.get_existential_deposit(
+ block_hash=fake_block_hash, reuse_block=fake_reuse_block_hash
+ )
+
+ # Asserts
+ mocked_substrate_get_constant.assert_awaited_once()
+ mocked_substrate_get_constant.assert_called_once_with(
+ module_name="Balances",
+ constant_name="ExistentialDeposit",
+ block_hash=fake_block_hash,
+ reuse_block_hash=fake_reuse_block_hash,
+ )
+ spy_balance_from_rao.assert_called_once_with(
+ mocked_substrate_get_constant.return_value
+ )
+ assert result == async_subtensor.Balance(mocked_substrate_get_constant.return_value)
+
+
+@pytest.mark.asyncio
+async def test_get_existential_deposit_raise_exception(subtensor, mocker):
+ """Tests get_existential_deposit method raise Exception."""
+ # Preps
+ fake_block_hash = "block_hash"
+ fake_reuse_block_hash = True
+
+ mocked_substrate_get_constant = mocker.AsyncMock(return_value=None)
+ subtensor.substrate.get_constant = mocked_substrate_get_constant
+
+ spy_balance_from_rao = mocker.spy(async_subtensor.Balance, "from_rao")
+
+ # Call
+ with pytest.raises(Exception):
+ await subtensor.get_existential_deposit(
+ block_hash=fake_block_hash, reuse_block=fake_reuse_block_hash
+ )
+
+ # Asserts
+ mocked_substrate_get_constant.assert_awaited_once()
+ mocked_substrate_get_constant.assert_called_once_with(
+ module_name="Balances",
+ constant_name="ExistentialDeposit",
+ block_hash=fake_block_hash,
+ reuse_block_hash=fake_reuse_block_hash,
+ )
+ spy_balance_from_rao.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_neurons(subtensor, mocker):
+ """Tests neurons method."""
+ # Preps
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_neurons = [mocker.Mock(), mocker.Mock()]
+ fake_weights = [(1, [(10, 20), (30, 40)]), (2, [(50, 60), (70, 80)])]
+ fake_bonds = [(1, [(10, 20), (30, 40)]), (2, [(50, 60), (70, 80)])]
+
+ mocked_neurons_lite = mocker.AsyncMock(return_value=fake_neurons)
+ subtensor.neurons_lite = mocked_neurons_lite
+
+ mocked_weights = mocker.AsyncMock(return_value=fake_weights)
+ subtensor.weights = mocked_weights
+
+ mocked_bonds = mocker.AsyncMock(return_value=fake_bonds)
+ subtensor.bonds = mocked_bonds
+
+ mocked_neuron_info_method = mocker.Mock()
+ async_subtensor.NeuronInfo.from_weights_bonds_and_neuron_lite = (
+ mocked_neuron_info_method
+ )
+
+ # Call
+ result = await subtensor.neurons(netuid=fake_netuid, block_hash=fake_block_hash)
+
+ # Asserts
+ mocked_neurons_lite.assert_awaited_once()
+ mocked_neurons_lite.assert_called_once_with(
+ netuid=fake_netuid, block_hash=fake_block_hash
+ )
+ mocked_weights.assert_awaited_once()
+ mocked_weights.assert_called_once_with(
+ netuid=fake_netuid, block_hash=fake_block_hash
+ )
+ mocked_bonds.assert_awaited_once()
+ mocked_bonds.assert_called_once_with(netuid=fake_netuid, block_hash=fake_block_hash)
+ assert result == [
+ mocked_neuron_info_method.return_value for _ in range(len(fake_neurons))
+ ]
+
+
+@pytest.mark.parametrize(
+ "fake_hex_bytes_result, response",
+ [(None, []), ("0xaabbccdd", b"\xaa\xbb\xcc\xdd")],
+ ids=["none", "with data"],
+)
+@pytest.mark.asyncio
+async def test_neurons_lite(subtensor, mocker, fake_hex_bytes_result, response):
+ """Tests neurons_lite method."""
+ # Preps
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_reuse_block_hash = True
+
+ mocked_query_runtime_api = mocker.AsyncMock(return_value=fake_hex_bytes_result)
+ subtensor.query_runtime_api = mocked_query_runtime_api
+
+ mocked_neuron_info_lite_list_from_vec_u8 = mocker.Mock()
+ async_subtensor.NeuronInfoLite.list_from_vec_u8 = (
+ mocked_neuron_info_lite_list_from_vec_u8
+ )
+
+ # Call
+ result = await subtensor.neurons_lite(
+ netuid=fake_netuid,
+ block_hash=fake_block_hash,
+ reuse_block=fake_reuse_block_hash,
+ )
+
+ # Assertions
+ mocked_query_runtime_api.assert_awaited_once()
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="NeuronInfoRuntimeApi",
+ method="get_neurons_lite",
+ params=[fake_netuid],
+ block_hash=fake_block_hash,
+ reuse_block=fake_reuse_block_hash,
+ )
+ if fake_hex_bytes_result:
+ mocked_neuron_info_lite_list_from_vec_u8.assert_called_once_with(
+ bytes.fromhex(fake_hex_bytes_result[2:])
+ )
+ assert result == mocked_neuron_info_lite_list_from_vec_u8.return_value
+ else:
+ mocked_neuron_info_lite_list_from_vec_u8.assert_not_called()
+ assert result == []
+
+
+@pytest.mark.asyncio
+async def test_neuron_for_uid_happy_path(subtensor, mocker):
+ """Tests neuron_for_uid method with happy path."""
+ # Preps
+ fake_uid = 1
+ fake_netuid = 2
+ fake_block_hash = "block_hash"
+
+ mocked_null_neuron = mocker.Mock()
+ async_subtensor.NeuronInfo.get_null_neuron = mocked_null_neuron
+
+ # no result in response
+ mocked_substrate_rpc_request = mocker.AsyncMock(
+ return_value={"result": b"some_result"}
+ )
+ subtensor.substrate.rpc_request = mocked_substrate_rpc_request
+
+ mocked_neuron_info_from_vec_u8 = mocker.Mock()
+ async_subtensor.NeuronInfo.from_vec_u8 = mocked_neuron_info_from_vec_u8
+
+ # Call
+ result = await subtensor.neuron_for_uid(
+ uid=fake_uid, netuid=fake_netuid, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_null_neuron.assert_not_called()
+ mocked_neuron_info_from_vec_u8.assert_called_once_with(
+ bytes(mocked_substrate_rpc_request.return_value.get("result"))
+ )
+ assert result == mocked_neuron_info_from_vec_u8.return_value
+
+
+@pytest.mark.asyncio
+async def test_neuron_for_uid_with_none_uid(subtensor, mocker):
+ """Tests neuron_for_uid method when uid is None."""
+ # Preps
+ fake_uid = None
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+
+ mocked_null_neuron = mocker.Mock()
+ async_subtensor.NeuronInfo.get_null_neuron = mocked_null_neuron
+
+ # Call
+ result = await subtensor.neuron_for_uid(
+ uid=fake_uid, netuid=fake_netuid, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_null_neuron.assert_called_once()
+ assert result == mocked_null_neuron.return_value
+
+
+@pytest.mark.asyncio
+async def test_neuron_for_uid(subtensor, mocker):
+ """Tests neuron_for_uid method."""
+ # Preps
+ fake_uid = 1
+ fake_netuid = 2
+ fake_block_hash = "block_hash"
+
+ mocked_null_neuron = mocker.Mock()
+ async_subtensor.NeuronInfo.get_null_neuron = mocked_null_neuron
+
+ # no result in response
+ mocked_substrate_rpc_request = mocker.AsyncMock(return_value={})
+ subtensor.substrate.rpc_request = mocked_substrate_rpc_request
+
+ mocked_neuron_info_from_vec_u8 = mocker.Mock()
+ async_subtensor.NeuronInfo.from_vec_u8 = mocked_neuron_info_from_vec_u8
+
+ # Call
+ result = await subtensor.neuron_for_uid(
+ uid=fake_uid, netuid=fake_netuid, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_null_neuron.assert_called_once()
+ mocked_neuron_info_from_vec_u8.assert_not_called()
+ assert result == mocked_null_neuron.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_delegated_no_block_hash_no_reuse(subtensor, mocker):
+ """Tests get_delegated method with no block_hash and reuse_block=False."""
+ # Preps
+ fake_coldkey_ss58 = "fake_ss58_address"
+
+ mocked_ss58_to_vec_u8 = mocker.Mock(return_value=b"encoded_coldkey")
+ mocker.patch.object(async_subtensor, "ss58_to_vec_u8", mocked_ss58_to_vec_u8)
+
+ mocked_rpc_request = mocker.AsyncMock(return_value={"result": b"mocked_result"})
+ subtensor.substrate.rpc_request = mocked_rpc_request
+
+ mocked_delegated_list_from_vec_u8 = mocker.Mock()
+ async_subtensor.DelegateInfo.delegated_list_from_vec_u8 = (
+ mocked_delegated_list_from_vec_u8
+ )
+
+ # Call
+ result = await subtensor.get_delegated(coldkey_ss58=fake_coldkey_ss58)
+
+ # Asserts
+ mocked_ss58_to_vec_u8.assert_called_once_with(fake_coldkey_ss58)
+ mocked_rpc_request.assert_called_once_with(
+ method="delegateInfo_getDelegated", params=[b"encoded_coldkey"]
+ )
+ mocked_delegated_list_from_vec_u8.assert_called_once_with(b"mocked_result")
+ assert result == mocked_delegated_list_from_vec_u8.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_delegated_with_block_hash(subtensor, mocker):
+ """Tests get_delegated method with specified block_hash."""
+ # Preps
+ fake_coldkey_ss58 = "fake_ss58_address"
+ fake_block_hash = "fake_block_hash"
+
+ mocked_ss58_to_vec_u8 = mocker.Mock(return_value=b"encoded_coldkey")
+ mocker.patch.object(async_subtensor, "ss58_to_vec_u8", mocked_ss58_to_vec_u8)
+
+ mocked_rpc_request = mocker.AsyncMock(return_value={"result": b"mocked_result"})
+ subtensor.substrate.rpc_request = mocked_rpc_request
+
+ mocked_delegated_list_from_vec_u8 = mocker.Mock()
+ async_subtensor.DelegateInfo.delegated_list_from_vec_u8 = (
+ mocked_delegated_list_from_vec_u8
+ )
+
+ # Call
+ result = await subtensor.get_delegated(
+ coldkey_ss58=fake_coldkey_ss58, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_ss58_to_vec_u8.assert_called_once_with(fake_coldkey_ss58)
+ mocked_rpc_request.assert_called_once_with(
+ method="delegateInfo_getDelegated", params=[fake_block_hash, b"encoded_coldkey"]
+ )
+ mocked_delegated_list_from_vec_u8.assert_called_once_with(b"mocked_result")
+ assert result == mocked_delegated_list_from_vec_u8.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_delegated_with_reuse_block(subtensor, mocker):
+ """Tests get_delegated method with reuse_block=True."""
+ # Preps
+ fake_coldkey_ss58 = "fake_ss58_address"
+ subtensor.substrate.last_block_hash = "last_block_hash"
+
+ mocked_ss58_to_vec_u8 = mocker.Mock(return_value=b"encoded_coldkey")
+ mocker.patch.object(async_subtensor, "ss58_to_vec_u8", mocked_ss58_to_vec_u8)
+
+ mocked_rpc_request = mocker.AsyncMock(return_value={"result": b"mocked_result"})
+ subtensor.substrate.rpc_request = mocked_rpc_request
+
+ mocked_delegated_list_from_vec_u8 = mocker.Mock()
+ async_subtensor.DelegateInfo.delegated_list_from_vec_u8 = (
+ mocked_delegated_list_from_vec_u8
+ )
+
+ # Call
+ result = await subtensor.get_delegated(
+ coldkey_ss58=fake_coldkey_ss58, reuse_block=True
+ )
+
+ # Asserts
+ mocked_ss58_to_vec_u8.assert_called_once_with(fake_coldkey_ss58)
+ mocked_rpc_request.assert_called_once_with(
+ method="delegateInfo_getDelegated",
+ params=["last_block_hash", b"encoded_coldkey"],
+ )
+ mocked_delegated_list_from_vec_u8.assert_called_once_with(b"mocked_result")
+ assert result == mocked_delegated_list_from_vec_u8.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_delegated_with_empty_result(subtensor, mocker):
+ """Tests get_delegated method when RPC request returns an empty result."""
+ # Preps
+ fake_coldkey_ss58 = "fake_ss58_address"
+
+ mocked_ss58_to_vec_u8 = mocker.Mock(return_value=b"encoded_coldkey")
+ mocker.patch.object(async_subtensor, "ss58_to_vec_u8", mocked_ss58_to_vec_u8)
+
+ mocked_rpc_request = mocker.AsyncMock(return_value={})
+ subtensor.substrate.rpc_request = mocked_rpc_request
+
+ # Call
+ result = await subtensor.get_delegated(coldkey_ss58=fake_coldkey_ss58)
+
+ # Asserts
+ mocked_ss58_to_vec_u8.assert_called_once_with(fake_coldkey_ss58)
+ mocked_rpc_request.assert_called_once_with(
+ method="delegateInfo_getDelegated", params=[b"encoded_coldkey"]
+ )
+ assert result == []
+
+
+@pytest.mark.asyncio
+async def test_query_identity_successful(subtensor, mocker):
+ """Tests query_identity method with successful identity query."""
+ # Preps
+ fake_key = "test_key"
+ fake_block_hash = "block_hash"
+ fake_identity_info = {"info": {"stake": (b"\x01\x02",)}}
+
+ mocked_query = mocker.AsyncMock(return_value=fake_identity_info)
+ subtensor.substrate.query = mocked_query
+
+ mocker.patch.object(
+ async_subtensor,
+ "_decode_hex_identity_dict",
+ return_value={"stake": "01 02"},
+ )
+
+ # Call
+ result = await subtensor.query_identity(key=fake_key, block_hash=fake_block_hash)
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="Registry",
+ storage_function="IdentityOf",
+ params=[fake_key],
+ block_hash=fake_block_hash,
+ reuse_block_hash=False,
+ )
+ assert result == {"stake": "01 02"}
+
+
+@pytest.mark.asyncio
+async def test_query_identity_no_info(subtensor, mocker):
+ """Tests query_identity method when no identity info is returned."""
+ # Preps
+ fake_key = "test_key"
+
+ mocked_query = mocker.AsyncMock(return_value=None)
+ subtensor.substrate.query = mocked_query
+
+ # Call
+ result = await subtensor.query_identity(key=fake_key)
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="Registry",
+ storage_function="IdentityOf",
+ params=[fake_key],
+ block_hash=None,
+ reuse_block_hash=False,
+ )
+ assert result == {}
+
+
+@pytest.mark.asyncio
+async def test_query_identity_type_error(subtensor, mocker):
+ """Tests query_identity method when a TypeError occurs during decoding."""
+ # Preps
+ fake_key = "test_key"
+ fake_identity_info = {"info": {"rank": (b"\xff\xfe",)}}
+
+ mocked_query = mocker.AsyncMock(return_value=fake_identity_info)
+ subtensor.substrate.query = mocked_query
+
+ mocker.patch.object(
+ async_subtensor,
+ "_decode_hex_identity_dict",
+ side_effect=TypeError,
+ )
+
+ # Call
+ result = await subtensor.query_identity(key=fake_key)
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="Registry",
+ storage_function="IdentityOf",
+ params=[fake_key],
+ block_hash=None,
+ reuse_block_hash=False,
+ )
+ assert result == {}
+
+
+@pytest.mark.asyncio
+async def test_weights_successful(subtensor, mocker):
+ """Tests weights method with successful weight distribution retrieval."""
+ # Preps
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_weights = [
+ (0, [(1, 10), (2, 20)]),
+ (1, [(0, 15), (2, 25)]),
+ ]
+
+ async def mock_query_map(**_):
+ for uid, w in fake_weights:
+ yield uid, w
+
+ mocker.patch.object(subtensor.substrate, "query_map", side_effect=mock_query_map)
+
+ # Call
+ result = await subtensor.weights(netuid=fake_netuid, block_hash=fake_block_hash)
+
+ # Asserts
+ subtensor.substrate.query_map.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Weights",
+ params=[fake_netuid],
+ block_hash=fake_block_hash,
+ )
+ assert result == fake_weights
+
+
+@pytest.mark.asyncio
+async def test_bonds(subtensor, mocker):
+ """Tests bonds method with successful bond distribution retrieval."""
+ # Preps
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_bonds = [
+ (0, [(1, 100), (2, 200)]),
+ (1, [(0, 150), (2, 250)]),
+ ]
+
+ async def mock_query_map(**_):
+ for uid, b in fake_bonds:
+ yield uid, b
+
+ mocker.patch.object(subtensor.substrate, "query_map", side_effect=mock_query_map)
+
+ # Call
+ result = await subtensor.bonds(netuid=fake_netuid, block_hash=fake_block_hash)
+
+ # Asserts
+ subtensor.substrate.query_map.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Bonds",
+ params=[fake_netuid],
+ block_hash=fake_block_hash,
+ )
+ assert result == fake_bonds
+
+
+@pytest.mark.asyncio
+async def test_does_hotkey_exist_true(subtensor, mocker):
+ """Tests does_hotkey_exist method when the hotkey exists and is valid."""
+ # Preps
+ fake_hotkey_ss58 = "valid_hotkey"
+ fake_block_hash = "block_hash"
+ fake_query_result = ["decoded_account_id"]
+
+ mocked_query = mocker.AsyncMock(return_value=fake_query_result)
+ subtensor.substrate.query = mocked_query
+
+ mocked_decode_account_id = mocker.Mock(return_value="another_account_id")
+ mocker.patch.object(async_subtensor, "decode_account_id", mocked_decode_account_id)
+
+ # Call
+ result = await subtensor.does_hotkey_exist(
+ hotkey_ss58=fake_hotkey_ss58, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[fake_hotkey_ss58],
+ block_hash=fake_block_hash,
+ reuse_block_hash=False,
+ )
+ mocked_decode_account_id.assert_called_once_with(fake_query_result[0])
+ assert result is True
+
+
+@pytest.mark.asyncio
+async def test_does_hotkey_exist_false_for_specific_account(subtensor, mocker):
+ """Tests does_hotkey_exist method when the hotkey exists but matches the specific account ID to ignore."""
+ # Preps
+ fake_hotkey_ss58 = "ignored_hotkey"
+ fake_query_result = ["ignored_account_id"]
+
+ mocked_query = mocker.AsyncMock(return_value=fake_query_result)
+ subtensor.substrate.query = mocked_query
+
+ # Mock the decode_account_id function to return the specific account ID that should be ignored
+ mocked_decode_account_id = mocker.Mock(
+ return_value="5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM"
+ )
+ mocker.patch.object(async_subtensor, "decode_account_id", mocked_decode_account_id)
+
+ # Call
+ result = await subtensor.does_hotkey_exist(hotkey_ss58=fake_hotkey_ss58)
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[fake_hotkey_ss58],
+ block_hash=None,
+ reuse_block_hash=False,
+ )
+ mocked_decode_account_id.assert_called_once_with(fake_query_result[0])
+ assert result is False
+
+
+@pytest.mark.asyncio
+async def test_get_hotkey_owner_successful(subtensor, mocker):
+ """Tests get_hotkey_owner method when the hotkey exists and has an owner."""
+ # Preps
+ fake_hotkey_ss58 = "valid_hotkey"
+ fake_block_hash = "block_hash"
+ fake_owner_account_id = "owner_account_id"
+
+ mocked_query = mocker.AsyncMock(return_value=[fake_owner_account_id])
+ subtensor.substrate.query = mocked_query
+
+ mocked_decode_account_id = mocker.Mock(return_value="decoded_owner_account_id")
+ mocker.patch.object(async_subtensor, "decode_account_id", mocked_decode_account_id)
+
+ mocked_does_hotkey_exist = mocker.AsyncMock(return_value=True)
+ subtensor.does_hotkey_exist = mocked_does_hotkey_exist
+
+ # Call
+ result = await subtensor.get_hotkey_owner(
+ hotkey_ss58=fake_hotkey_ss58, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[fake_hotkey_ss58],
+ block_hash=fake_block_hash,
+ )
+ mocked_decode_account_id.assert_called_once_with(fake_owner_account_id)
+ mocked_does_hotkey_exist.assert_awaited_once_with(
+ fake_hotkey_ss58, block_hash=fake_block_hash
+ )
+ assert result == "decoded_owner_account_id"
+
+
+@pytest.mark.asyncio
+async def test_get_hotkey_owner_non_existent_hotkey(subtensor, mocker):
+ """Tests get_hotkey_owner method when the hotkey does not exist in the query result."""
+ # Preps
+ fake_hotkey_ss58 = "non_existent_hotkey"
+ fake_block_hash = "block_hash"
+
+ mocked_query = mocker.AsyncMock(return_value=[None])
+ subtensor.substrate.query = mocked_query
+
+ mocked_decode_account_id = mocker.Mock(return_value=None)
+ mocker.patch.object(async_subtensor, "decode_account_id", mocked_decode_account_id)
+
+ # Call
+ result = await subtensor.get_hotkey_owner(
+ hotkey_ss58=fake_hotkey_ss58, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[fake_hotkey_ss58],
+ block_hash=fake_block_hash,
+ )
+ mocked_decode_account_id.assert_called_once_with(None)
+ assert result is None
+
+
+@pytest.mark.asyncio
+async def test_get_hotkey_owner_exists_but_does_not_exist_flag_false(subtensor, mocker):
+ """Tests get_hotkey_owner method when decode_account_id returns a value but does_hotkey_exist returns False."""
+ # Preps
+ fake_hotkey_ss58 = "valid_hotkey"
+ fake_block_hash = "block_hash"
+ fake_owner_account_id = "owner_account_id"
+
+ mocked_query = mocker.AsyncMock(return_value=[fake_owner_account_id])
+ subtensor.substrate.query = mocked_query
+
+ mocked_decode_account_id = mocker.Mock(return_value="decoded_owner_account_id")
+ mocker.patch.object(async_subtensor, "decode_account_id", mocked_decode_account_id)
+
+ mocked_does_hotkey_exist = mocker.AsyncMock(return_value=False)
+ subtensor.does_hotkey_exist = mocked_does_hotkey_exist
+
+ # Call
+ result = await subtensor.get_hotkey_owner(
+ hotkey_ss58=fake_hotkey_ss58, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Owner",
+ params=[fake_hotkey_ss58],
+ block_hash=fake_block_hash,
+ )
+ mocked_decode_account_id.assert_called_once_with(fake_owner_account_id)
+ mocked_does_hotkey_exist.assert_awaited_once_with(
+ fake_hotkey_ss58, block_hash=fake_block_hash
+ )
+ assert result is None
+
+
+@pytest.mark.asyncio
+async def test_sign_and_send_extrinsic_success_finalization(subtensor, mocker):
+ """Tests sign_and_send_extrinsic when the extrinsic is successfully finalized."""
+ # Preps
+ fake_call = mocker.Mock()
+ fake_wallet = mocker.Mock()
+ fake_extrinsic = mocker.Mock()
+ fake_response = mocker.Mock()
+
+ mocked_create_signed_extrinsic = mocker.AsyncMock(return_value=fake_extrinsic)
+ subtensor.substrate.create_signed_extrinsic = mocked_create_signed_extrinsic
+
+ mocked_submit_extrinsic = mocker.AsyncMock(return_value=fake_response)
+ subtensor.substrate.submit_extrinsic = mocked_submit_extrinsic
+
+ fake_response.process_events = mocker.AsyncMock()
+
+ async def fake_is_success():
+ return True
+
+ fake_response.is_success = fake_is_success()
+
+ # Call
+ result = await subtensor.sign_and_send_extrinsic(
+ call=fake_call,
+ wallet=fake_wallet,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ )
+
+ # Asserts
+ mocked_create_signed_extrinsic.assert_called_once_with(
+ call=fake_call, keypair=fake_wallet.coldkey
+ )
+ mocked_submit_extrinsic.assert_called_once_with(
+ fake_extrinsic,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ )
+ fake_response.process_events.assert_awaited_once()
+ assert result == (True, "")
+
+
+@pytest.mark.asyncio
+async def test_sign_and_send_extrinsic_error_finalization(subtensor, mocker):
+ """Tests sign_and_send_extrinsic when the extrinsic is error finalized."""
+ # Preps
+ fake_call = mocker.Mock()
+ fake_wallet = mocker.Mock()
+ fake_extrinsic = mocker.Mock()
+ fake_response = mocker.Mock()
+
+ mocked_create_signed_extrinsic = mocker.AsyncMock(return_value=fake_extrinsic)
+ subtensor.substrate.create_signed_extrinsic = mocked_create_signed_extrinsic
+
+ mocked_submit_extrinsic = mocker.AsyncMock(return_value=fake_response)
+ subtensor.substrate.submit_extrinsic = mocked_submit_extrinsic
+
+ fake_response.process_events = mocker.AsyncMock()
+
+ async def fake_is_success():
+ return False
+
+ fake_response.is_success = fake_is_success()
+
+ async def fake_error_message():
+ return {"some error": "message"}
+
+ fake_response.error_message = fake_error_message()
+
+ mocked_format_error_message = mocker.Mock()
+ async_subtensor.format_error_message = mocked_format_error_message
+
+ # Call
+ result = await subtensor.sign_and_send_extrinsic(
+ call=fake_call,
+ wallet=fake_wallet,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ )
+
+ # Asserts
+ mocked_create_signed_extrinsic.assert_called_once_with(
+ call=fake_call, keypair=fake_wallet.coldkey
+ )
+ mocked_submit_extrinsic.assert_called_once_with(
+ fake_extrinsic,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ )
+ fake_response.process_events.assert_awaited_once()
+ assert result == (False, mocked_format_error_message.return_value)
+
+
+@pytest.mark.asyncio
+async def test_sign_and_send_extrinsic_success_without_inclusion_finalization(
+ subtensor, mocker
+):
+ """Tests sign_and_send_extrinsic when extrinsic is submitted without waiting for inclusion or finalization."""
+ # Preps
+ fake_call = mocker.Mock()
+ fake_wallet = mocker.Mock()
+ fake_extrinsic = mocker.Mock()
+
+ mocked_create_signed_extrinsic = mocker.AsyncMock(return_value=fake_extrinsic)
+ subtensor.substrate.create_signed_extrinsic = mocked_create_signed_extrinsic
+
+ mocked_submit_extrinsic = mocker.AsyncMock()
+ subtensor.substrate.submit_extrinsic = mocked_submit_extrinsic
+
+ # Call
+ result = await subtensor.sign_and_send_extrinsic(
+ call=fake_call,
+ wallet=fake_wallet,
+ wait_for_inclusion=False,
+ wait_for_finalization=False,
+ )
+
+ # Asserts
+ mocked_create_signed_extrinsic.assert_awaited_once()
+ mocked_create_signed_extrinsic.assert_called_once_with(
+ call=fake_call, keypair=fake_wallet.coldkey
+ )
+ mocked_submit_extrinsic.assert_awaited_once()
+ mocked_submit_extrinsic.assert_called_once_with(
+ fake_extrinsic,
+ wait_for_inclusion=False,
+ wait_for_finalization=False,
+ )
+ assert result == (True, "")
+
+
+@pytest.mark.asyncio
+async def test_sign_and_send_extrinsic_substrate_request_exception(subtensor, mocker):
+ """Tests sign_and_send_extrinsic when SubstrateRequestException is raised."""
+ # Preps
+ fake_call = mocker.Mock()
+ fake_wallet = mocker.Mock()
+ fake_extrinsic = mocker.Mock()
+ fake_exception = async_subtensor.SubstrateRequestException("Test Exception")
+
+ mocked_create_signed_extrinsic = mocker.AsyncMock(return_value=fake_extrinsic)
+ subtensor.substrate.create_signed_extrinsic = mocked_create_signed_extrinsic
+
+ mocked_submit_extrinsic = mocker.AsyncMock(side_effect=fake_exception)
+ subtensor.substrate.submit_extrinsic = mocked_submit_extrinsic
+
+ mocker.patch.object(
+ async_subtensor,
+ "format_error_message",
+ return_value=str(fake_exception),
+ )
+
+ # Call
+ result = await subtensor.sign_and_send_extrinsic(
+ call=fake_call,
+ wallet=fake_wallet,
+ wait_for_inclusion=True,
+ wait_for_finalization=True,
+ )
+
+ # Asserts
+ assert result == (False, str(fake_exception))
+
+
+@pytest.mark.asyncio
+async def test_get_children_success(subtensor, mocker):
+ """Tests get_children when children are successfully retrieved and formatted."""
+ # Preps
+ fake_hotkey = "valid_hotkey"
+ fake_netuid = 1
+ fake_children = [
+ (1000, ["child_key_1"]),
+ (2000, ["child_key_2"]),
+ ]
+
+ mocked_query = mocker.AsyncMock(return_value=fake_children)
+ subtensor.substrate.query = mocked_query
+
+ mocked_decode_account_id = mocker.Mock(
+ side_effect=["decoded_child_key_1", "decoded_child_key_2"]
+ )
+ mocker.patch.object(async_subtensor, "decode_account_id", mocked_decode_account_id)
+
+ expected_formatted_children = [
+ (1000, "decoded_child_key_1"),
+ (2000, "decoded_child_key_2"),
+ ]
+
+ # Call
+ result = await subtensor.get_children(hotkey=fake_hotkey, netuid=fake_netuid)
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="ChildKeys",
+ params=[fake_hotkey, fake_netuid],
+ )
+ mocked_decode_account_id.assert_has_calls(
+ [mocker.call("child_key_1"), mocker.call("child_key_2")]
+ )
+ assert result == (True, expected_formatted_children, "")
+
+
+@pytest.mark.asyncio
+async def test_get_children_no_children(subtensor, mocker):
+ """Tests get_children when there are no children to retrieve."""
+ # Preps
+ fake_hotkey = "valid_hotkey"
+ fake_netuid = 1
+ fake_children = []
+
+ mocked_query = mocker.AsyncMock(return_value=fake_children)
+ subtensor.substrate.query = mocked_query
+
+ # Call
+ result = await subtensor.get_children(hotkey=fake_hotkey, netuid=fake_netuid)
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="ChildKeys",
+ params=[fake_hotkey, fake_netuid],
+ )
+ assert result == (True, [], "")
+
+
+@pytest.mark.asyncio
+async def test_get_children_substrate_request_exception(subtensor, mocker):
+ """Tests get_children when SubstrateRequestException is raised."""
+ # Preps
+ fake_hotkey = "valid_hotkey"
+ fake_netuid = 1
+ fake_exception = async_subtensor.SubstrateRequestException("Test Exception")
+
+ mocked_query = mocker.AsyncMock(side_effect=fake_exception)
+ subtensor.substrate.query = mocked_query
+
+ mocked_format_error_message = mocker.Mock(return_value="Formatted error message")
+ mocker.patch.object(
+ async_subtensor, "format_error_message", mocked_format_error_message
+ )
+
+ # Call
+ result = await subtensor.get_children(hotkey=fake_hotkey, netuid=fake_netuid)
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="ChildKeys",
+ params=[fake_hotkey, fake_netuid],
+ )
+ mocked_format_error_message.assert_called_once_with(
+ fake_exception, subtensor.substrate
+ )
+ assert result == (False, [], "Formatted error message")
+
+
+@pytest.mark.asyncio
+async def test_get_subnet_hyperparameters_success(subtensor, mocker):
+ """Tests get_subnet_hyperparameters with successful hyperparameter retrieval."""
+ # Preps
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_hex_bytes_result = "0xaabbccdd"
+
+ mocked_query_runtime_api = mocker.AsyncMock(return_value=fake_hex_bytes_result)
+ subtensor.query_runtime_api = mocked_query_runtime_api
+
+ mocked_from_vec_u8 = mocker.Mock()
+ mocker.patch.object(
+ async_subtensor.SubnetHyperparameters, "from_vec_u8", mocked_from_vec_u8
+ )
+
+ # Call
+ result = await subtensor.get_subnet_hyperparameters(
+ netuid=fake_netuid, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetInfoRuntimeApi",
+ method="get_subnet_hyperparams",
+ params=[fake_netuid],
+ block_hash=fake_block_hash,
+ )
+ bytes_result = bytes.fromhex(fake_hex_bytes_result[2:])
+ mocked_from_vec_u8.assert_called_once_with(bytes_result)
+ assert result == mocked_from_vec_u8.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_subnet_hyperparameters_no_data(subtensor, mocker):
+ """Tests get_subnet_hyperparameters when no hyperparameters data is returned."""
+ # Preps
+ fake_netuid = 1
+
+ mocked_query_runtime_api = mocker.AsyncMock(return_value=None)
+ subtensor.query_runtime_api = mocked_query_runtime_api
+
+ # Call
+ result = await subtensor.get_subnet_hyperparameters(netuid=fake_netuid)
+
+ # Asserts
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetInfoRuntimeApi",
+ method="get_subnet_hyperparams",
+ params=[fake_netuid],
+ block_hash=None,
+ )
+ assert result == []
+
+
+@pytest.mark.asyncio
+async def test_get_subnet_hyperparameters_without_0x_prefix(subtensor, mocker):
+ """Tests get_subnet_hyperparameters when hex_bytes_result is without 0x prefix."""
+ # Preps
+ fake_netuid = 1
+ fake_hex_bytes_result = "aabbccdd" # without "0x" prefix
+
+ mocked_query_runtime_api = mocker.AsyncMock(return_value=fake_hex_bytes_result)
+ subtensor.query_runtime_api = mocked_query_runtime_api
+
+ mocked_from_vec_u8 = mocker.Mock()
+ mocker.patch.object(
+ async_subtensor.SubnetHyperparameters, "from_vec_u8", mocked_from_vec_u8
+ )
+
+ # Call
+ result = await subtensor.get_subnet_hyperparameters(netuid=fake_netuid)
+
+ # Asserts
+ mocked_query_runtime_api.assert_called_once_with(
+ runtime_api="SubnetInfoRuntimeApi",
+ method="get_subnet_hyperparams",
+ params=[fake_netuid],
+ block_hash=None,
+ )
+ bytes_result = bytes.fromhex(fake_hex_bytes_result)
+ mocked_from_vec_u8.assert_called_once_with(bytes_result)
+ assert result == mocked_from_vec_u8.return_value
+
+
+@pytest.mark.asyncio
+async def test_get_vote_data_success(subtensor, mocker):
+ """Tests get_vote_data when voting data is successfully retrieved."""
+ # Preps
+ fake_proposal_hash = "valid_proposal_hash"
+ fake_block_hash = "block_hash"
+ fake_vote_data = {"ayes": ["senate_member_1"], "nays": ["senate_member_2"]}
+
+ mocked_query = mocker.AsyncMock(return_value=fake_vote_data)
+ subtensor.substrate.query = mocked_query
+
+ mocked_proposal_vote_data = mocker.Mock()
+ mocker.patch.object(
+ async_subtensor, "ProposalVoteData", return_value=mocked_proposal_vote_data
+ )
+
+ # Call
+ result = await subtensor.get_vote_data(
+ proposal_hash=fake_proposal_hash, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="Triumvirate",
+ storage_function="Voting",
+ params=[fake_proposal_hash],
+ block_hash=fake_block_hash,
+ reuse_block_hash=False,
+ )
+ assert result == mocked_proposal_vote_data
+
+
+@pytest.mark.asyncio
+async def test_get_vote_data_no_data(subtensor, mocker):
+ """Tests get_vote_data when no voting data is available."""
+ # Preps
+ fake_proposal_hash = "invalid_proposal_hash"
+ fake_block_hash = "block_hash"
+
+ mocked_query = mocker.AsyncMock(return_value=None)
+ subtensor.substrate.query = mocked_query
+
+ # Call
+ result = await subtensor.get_vote_data(
+ proposal_hash=fake_proposal_hash, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="Triumvirate",
+ storage_function="Voting",
+ params=[fake_proposal_hash],
+ block_hash=fake_block_hash,
+ reuse_block_hash=False,
+ )
+ assert result is None
+
+
+@pytest.mark.asyncio
+async def test_get_delegate_identities(subtensor, mocker):
+ """Tests get_delegate_identities with successful data retrieval from both chain and GitHub."""
+ # Preps
+ fake_block_hash = "block_hash"
+ fake_chain_data = [
+ (["delegate1_ss58"], {"info": {"name": "Chain Delegate 1"}}),
+ (["delegate2_ss58"], {"info": {"name": "Chain Delegate 2"}}),
+ ]
+ fake_github_data = {
+ "delegate1_ss58": {
+ "name": "GitHub Delegate 1",
+ "url": "https://delegate1.com",
+ "description": "GitHub description 1",
+ "fingerprint": "fingerprint1",
+ },
+ "delegate3_ss58": {
+ "name": "GitHub Delegate 3",
+ "url": "https://delegate3.com",
+ "description": "GitHub description 3",
+ "fingerprint": "fingerprint3",
+ },
+ }
+
+ mocked_query_map = mocker.AsyncMock(return_value=fake_chain_data)
+ subtensor.substrate.query_map = mocked_query_map
+
+ mocked_decode_account_id = mocker.Mock(side_effect=lambda ss58: ss58)
+ mocker.patch.object(async_subtensor, "decode_account_id", mocked_decode_account_id)
+
+ mocked_decode_hex_identity_dict = mocker.Mock(side_effect=lambda data: data)
+ mocker.patch.object(
+ async_subtensor, "decode_hex_identity_dict", mocked_decode_hex_identity_dict
+ )
+
+ mock_response = mocker.Mock()
+ mock_response.ok = True
+ mock_response.json = mocker.AsyncMock(return_value=fake_github_data)
+
+ mock_session_get = mocker.AsyncMock(return_value=mock_response)
+ mocker.patch("aiohttp.ClientSession.get", mock_session_get)
+
+ # Call
+ result = await subtensor.get_delegate_identities(block_hash=fake_block_hash)
+
+ # Asserts
+ mocked_query_map.assert_called_once_with(
+ module="Registry",
+ storage_function="IdentityOf",
+ block_hash=fake_block_hash,
+ )
+ mock_session_get.assert_called_once_with(async_subtensor.DELEGATES_DETAILS_URL)
+
+ assert result["delegate1_ss58"].display == "GitHub Delegate 1"
+ assert result["delegate2_ss58"].display == ""
+ assert result["delegate3_ss58"].display == "GitHub Delegate 3"
+
+
+@pytest.mark.asyncio
+async def test_is_hotkey_registered_true(subtensor, mocker):
+ """Tests is_hotkey_registered when the hotkey is registered on the netuid."""
+ # Preps
+ fake_netuid = 1
+ fake_hotkey_ss58 = "registered_hotkey"
+ fake_result = "some_value"
+ mocked_query = mocker.AsyncMock(return_value=fake_result)
+ subtensor.substrate.query = mocked_query
+
+ # Call
+ result = await subtensor.is_hotkey_registered(
+ netuid=fake_netuid, hotkey_ss58=fake_hotkey_ss58
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[fake_netuid, fake_hotkey_ss58],
+ )
+ assert result is True
+
+
+@pytest.mark.asyncio
+async def test_is_hotkey_registered_false(subtensor, mocker):
+ """Tests is_hotkey_registered when the hotkey is not registered on the netuid."""
+ # Preps
+ fake_netuid = 1
+ fake_hotkey_ss58 = "unregistered_hotkey"
+ fake_result = None
+
+ mocked_query = mocker.AsyncMock(return_value=fake_result)
+ subtensor.substrate.query = mocked_query
+
+ # Call
+ result = await subtensor.is_hotkey_registered(
+ netuid=fake_netuid, hotkey_ss58=fake_hotkey_ss58
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[fake_netuid, fake_hotkey_ss58],
+ )
+ assert result is False
+
+
+@pytest.mark.asyncio
+async def test_get_uid_for_hotkey_on_subnet_registered(subtensor, mocker):
+ """Tests get_uid_for_hotkey_on_subnet when the hotkey is registered and has a UID."""
+ # Preps
+ fake_hotkey_ss58 = "registered_hotkey"
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_uid = 123
+
+ mocked_query = mocker.AsyncMock(return_value=fake_uid)
+ subtensor.substrate.query = mocked_query
+
+ # Call
+ result = await subtensor.get_uid_for_hotkey_on_subnet(
+ hotkey_ss58=fake_hotkey_ss58, netuid=fake_netuid, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[fake_netuid, fake_hotkey_ss58],
+ block_hash=fake_block_hash,
+ )
+ assert result == fake_uid
+
+
+@pytest.mark.asyncio
+async def test_get_uid_for_hotkey_on_subnet_not_registered(subtensor, mocker):
+ """Tests get_uid_for_hotkey_on_subnet when the hotkey is not registered on the subnet."""
+ # Preps
+ fake_hotkey_ss58 = "unregistered_hotkey"
+ fake_netuid = 1
+ fake_block_hash = "block_hash"
+ fake_result = None
+
+ mocked_query = mocker.AsyncMock(return_value=fake_result)
+ subtensor.substrate.query = mocked_query
+
+ # Call
+ result = await subtensor.get_uid_for_hotkey_on_subnet(
+ hotkey_ss58=fake_hotkey_ss58, netuid=fake_netuid, block_hash=fake_block_hash
+ )
+
+ # Asserts
+ mocked_query.assert_called_once_with(
+ module="SubtensorModule",
+ storage_function="Uids",
+ params=[fake_netuid, fake_hotkey_ss58],
+ block_hash=fake_block_hash,
+ )
+ assert result is None
+
+
+@pytest.mark.asyncio
+async def test_weights_rate_limit_success(subtensor, mocker):
+ """Tests weights_rate_limit when the hyperparameter value is successfully retrieved."""
+ # Preps
+ fake_netuid = 1
+ fake_rate_limit = 10
+
+ mocked_get_hyperparameter = mocker.AsyncMock(return_value=fake_rate_limit)
+ subtensor.get_hyperparameter = mocked_get_hyperparameter
+
+ # Call
+ result = await subtensor.weights_rate_limit(netuid=fake_netuid)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="WeightsSetRateLimit", netuid=fake_netuid
+ )
+ assert result == fake_rate_limit
+
+
+@pytest.mark.asyncio
+async def test_weights_rate_limit_none(subtensor, mocker):
+ """Tests weights_rate_limit when the hyperparameter value is not found."""
+ # Preps
+ fake_netuid = 1
+ fake_result = None
+
+ mocked_get_hyperparameter = mocker.AsyncMock(return_value=fake_result)
+ subtensor.get_hyperparameter = mocked_get_hyperparameter
+
+ # Call
+ result = await subtensor.weights_rate_limit(netuid=fake_netuid)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="WeightsSetRateLimit", netuid=fake_netuid
+ )
+ assert result is None
+
+
+@pytest.mark.asyncio
+async def test_blocks_since_last_update_success(subtensor, mocker):
+ """Tests blocks_since_last_update when the data is successfully retrieved."""
+ # Preps
+ fake_netuid = 1
+ fake_uid = 5
+ last_update_block = 50
+ current_block = 100
+ fake_blocks_since_update = current_block - last_update_block
+
+ mocked_get_hyperparameter = mocker.AsyncMock(
+ return_value={fake_uid: last_update_block}
+ )
+ subtensor.get_hyperparameter = mocked_get_hyperparameter
+
+ mocked_get_current_block = mocker.AsyncMock(return_value=current_block)
+ subtensor.get_current_block = mocked_get_current_block
+
+ # Call
+ result = await subtensor.blocks_since_last_update(netuid=fake_netuid, uid=fake_uid)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="LastUpdate", netuid=fake_netuid
+ )
+ mocked_get_current_block.assert_called_once()
+ assert result == fake_blocks_since_update
+
+
+@pytest.mark.asyncio
+async def test_blocks_since_last_update_no_last_update(subtensor, mocker):
+ """Tests blocks_since_last_update when the last update data is not found."""
+ # Preps
+ fake_netuid = 1
+ fake_uid = 5
+ fake_result = None
+
+ mocked_get_hyperparameter = mocker.AsyncMock(return_value=fake_result)
+ subtensor.get_hyperparameter = mocked_get_hyperparameter
+
+ # Call
+ result = await subtensor.blocks_since_last_update(netuid=fake_netuid, uid=fake_uid)
+
+ # Asserts
+ mocked_get_hyperparameter.assert_called_once_with(
+ param_name="LastUpdate", netuid=fake_netuid
+ )
+ assert result is None
diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py
index 765e093ddb..e3f573c67f 100644
--- a/tests/unit_tests/test_subtensor.py
+++ b/tests/unit_tests/test_subtensor.py
@@ -2069,34 +2069,6 @@ def test_get_all_subnets_info_no_data(mocker, subtensor, result_):
subtensor_module.SubnetInfo.list_from_vec_u8.assert_not_called()
-def test_get_all_subnets_info_retry(mocker, subtensor):
- """Test get_all_subnets_info retries on failure."""
- # Prep
- block = 123
- subnet_data = [1, 2, 3]
- mocker.patch.object(
- subtensor.substrate, "get_block_hash", return_value="mock_block_hash"
- )
- mock_response = {"result": subnet_data}
- mock_rpc_request = mocker.patch.object(
- subtensor.substrate,
- "rpc_request",
- side_effect=[Exception, Exception, mock_response],
- )
- mocker.patch.object(
- subtensor_module.SubnetInfo, "list_from_vec_u8", return_value=["some_data"]
- )
-
- # Call
- result = subtensor.get_all_subnets_info(block)
-
- # Asserts
- subtensor.substrate.get_block_hash.assert_called_with(block)
- assert mock_rpc_request.call_count == 3
- subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data)
- assert result == ["some_data"]
-
-
def test_get_delegate_take_success(subtensor, mocker):
"""Verify `get_delegate_take` method successful path."""
# Preps
diff --git a/tests/unit_tests/utils/test_utils.py b/tests/unit_tests/utils/test_utils.py
index 8ed28c0670..a01b42f31d 100644
--- a/tests/unit_tests/utils/test_utils.py
+++ b/tests/unit_tests/utils/test_utils.py
@@ -15,9 +15,11 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+from bittensor_wallet import Wallet
+import pytest
+
from bittensor import utils
from bittensor.core.settings import SS58_FORMAT
-import pytest
def test_ss58_to_vec_u8(mocker):
@@ -167,3 +169,57 @@ def test_is_valid_bittensor_address_or_public_key(mocker, test_input, expected_r
if isinstance(test_input, str) and not test_input.startswith("0x"):
assert mocked_ss58_is_valid_ss58_address.call_count == 2
assert result == expected_result
+
+
+@pytest.mark.parametrize(
+ "unlock_type, wallet_method",
+ [
+ ("coldkey", "unlock_coldkey"),
+ ("hotkey", "unlock_hotkey"),
+ ],
+)
+def test_unlock_key(mocker, unlock_type, wallet_method):
+ """Test the unlock key function."""
+ # Preps
+ mock_wallet = mocker.Mock(autospec=Wallet)
+
+ # Call
+ result = utils.unlock_key(mock_wallet, unlock_type=unlock_type)
+
+ # Asserts
+ getattr(mock_wallet, wallet_method).assert_called_once()
+ assert result == utils.UnlockStatus(True, "")
+
+
+def test_unlock_key_raise_value_error(mocker):
+ """Test the unlock key function raises ValueError."""
+ with pytest.raises(ValueError):
+ utils.unlock_key(wallet=mocker.Mock(autospec=Wallet), unlock_type="coldkeypub")
+
+
+@pytest.mark.parametrize(
+ "side_effect, response",
+ [
+ (
+ utils.KeyFileError("Simulated KeyFileError exception"),
+ utils.UnlockStatus(
+ False,
+ "Coldkey keyfile is corrupt, non-writable, or non-readable, or non-existent.",
+ ),
+ ),
+ (
+ utils.PasswordError("Simulated PasswordError exception"),
+ utils.UnlockStatus(
+ False, "The password used to decrypt your Coldkey keyfile is invalid."
+ ),
+ ),
+ ],
+ ids=["PasswordError", "KeyFileError"],
+)
+def test_unlock_key_errors(mocker, side_effect, response):
+ """Test the unlock key function handles the errors."""
+ mock_wallet = mocker.Mock(autospec=Wallet)
+ mock_wallet.unlock_coldkey.side_effect = side_effect
+ result = utils.unlock_key(wallet=mock_wallet)
+
+ assert result == response