diff --git a/.github/workflows/check-devnet.yml b/.github/workflows/check-devnet.yml new file mode 100644 index 000000000..d8f2be391 --- /dev/null +++ b/.github/workflows/check-devnet.yml @@ -0,0 +1,39 @@ +name: Devnet Deploy Check + +on: + pull_request: + branches: [devnet] + +env: + CARGO_TERM_COLOR: always + +jobs: + check-spec-version: + name: Check spec_version bump + runs-on: SubtensorCI + steps: + - name: Dependencies + run: | + sudo apt-get update && + sudo apt-get install -y curl clang curl libssl-dev llvm \ + libudev-dev protobuf-compiler + + - name: Set up Rust Toolchain + run: curl https://sh.rustup.rs -sSf | sh -s -- -y + + - name: Install substrate-spec-version + run: cargo install substrate-spec-version + + - name: Check-out repository under $GITHUB_WORKSPACE + uses: actions/checkout@v4 + + - name: Check that spec_version has been bumped + run: | + spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version wss://dev.chain.opentensor.ai:443 | tr -d '\n') + echo "network spec_version: $spec_version" + : ${spec_version:?bad spec version} + local_spec_version=$(cargo run -p node-subtensor-runtime --bin spec_version | tr -d '\n') + echo "local spec_version: $local_spec_version" + echo "network spec_version: $spec_version" + if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi + echo "$local_spec_version > $spec_version ✅" diff --git a/.github/workflows/check-finney.yml b/.github/workflows/check-finney.yml new file mode 100644 index 000000000..c74980bcd --- /dev/null +++ b/.github/workflows/check-finney.yml @@ -0,0 +1,39 @@ +name: Finney Deploy Check + +on: + pull_request: + branches: [finney] + +env: + CARGO_TERM_COLOR: always + +jobs: + check-spec-version: + name: Check spec_version bump + runs-on: SubtensorCI + steps: + - name: Dependencies + run: | + sudo apt-get update && + sudo apt-get install -y curl clang curl libssl-dev llvm \ + libudev-dev protobuf-compiler + + - name: Set up Rust Toolchain + run: curl https://sh.rustup.rs -sSf | sh -s -- -y + + - name: Install substrate-spec-version + run: cargo install substrate-spec-version + + - name: Check-out repository under $GITHUB_WORKSPACE + uses: actions/checkout@v4 + + - name: Check that spec_version has been bumped + run: | + spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version wss://entrypoint-finney.opentensor.ai:443 | tr -d '\n') + echo "network spec_version: $spec_version" + : ${spec_version:?bad spec version} + local_spec_version=$(cargo run -p node-subtensor-runtime --bin spec_version | tr -d '\n') + echo "local spec_version: $local_spec_version" + echo "network spec_version: $spec_version" + if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi + echo "$local_spec_version > $spec_version ✅" diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index 61f7fb920..e7be25ed7 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -5,16 +5,10 @@ concurrency: cancel-in-progress: true on: - ## Run automatically for all PRs against main, regardless of what the changes are - ## to be safe and so we can more easily force re-run the CI when github is being - ## weird by using a blank commit push: - branches: [main, development, staging] + branches: [main, devnet-ready, devnet, testnet, finney] - ## - # Run automatically for PRs against default/main branch if Rust files change pull_request: - branches: [main, development, staging] ## Allow running workflow manually from the Actions tab workflow_dispatch: @@ -56,7 +50,7 @@ jobs: TARGET: ${{ matrix.rust-target }} steps: - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: sudo apt-get update && sudo apt-get install -y build-essential @@ -77,7 +71,7 @@ jobs: strategy: matrix: rust-branch: - - nightly-2024-03-05 + - stable rust-target: - x86_64-unknown-linux-gnu # - x86_64-apple-darwin @@ -97,7 +91,7 @@ jobs: TARGET: ${{ matrix.rust-target }} steps: - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: | @@ -125,7 +119,7 @@ jobs: strategy: matrix: rust-branch: - - nightly-2024-03-05 + - stable rust-target: - x86_64-unknown-linux-gnu # - x86_64-apple-darwin @@ -166,7 +160,6 @@ jobs: - name: cargo clippy --workspace --all-targets --all-features -- -D warnings run: cargo clippy --workspace --all-targets --all-features -- -D warnings - # runs cargo test --workspace cargo-test: name: cargo test @@ -174,7 +167,7 @@ jobs: strategy: matrix: rust-branch: - - nightly-2024-03-05 + - stable rust-target: - x86_64-unknown-linux-gnu # - x86_64-apple-darwin @@ -194,7 +187,7 @@ jobs: TARGET: ${{ matrix.rust-target }} steps: - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: | @@ -223,7 +216,7 @@ jobs: strategy: matrix: rust-branch: - - nightly-2024-03-05 + - stable rust-target: - x86_64-unknown-linux-gnu # - x86_64-apple-darwin @@ -243,7 +236,7 @@ jobs: TARGET: ${{ matrix.rust-target }} steps: - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: | @@ -272,7 +265,7 @@ jobs: strategy: matrix: rust-branch: - - nightly-2024-03-05 + - stable rust-target: - x86_64-unknown-linux-gnu # - x86_64-apple-darwin @@ -292,7 +285,7 @@ jobs: TARGET: ${{ matrix.rust-target }} steps: - name: Check-out repository under $GITHUB_WORKSPACE - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: | @@ -339,7 +332,7 @@ jobs: run: cargo install --locked -q zepter && zepter --version - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Dont clone historic commits. @@ -351,7 +344,7 @@ jobs: runs-on: SubtensorCI steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run Try Runtime Checks uses: "paritytech/try-runtime-gha@v0.1.0" @@ -389,4 +382,4 @@ jobs: runtime-package: "node-subtensor-runtime" node-uri: "wss://test.chain.opentensor.ai:443" checks: "pre-and-post" - extra-args: "--disable-spec-version-check --no-weight-warnings" + extra-args: "--disable-spec-version-check --no-weight-warnings" \ No newline at end of file diff --git a/.github/workflows/check-testnet.yml b/.github/workflows/check-testnet.yml new file mode 100644 index 000000000..5fe046b06 --- /dev/null +++ b/.github/workflows/check-testnet.yml @@ -0,0 +1,39 @@ +name: Testnet Deploy Check + +on: + pull_request: + branches: [testnet] + +env: + CARGO_TERM_COLOR: always + +jobs: + check-spec-version: + name: Check spec_version bump + runs-on: SubtensorCI + steps: + - name: Dependencies + run: | + sudo apt-get update && + sudo apt-get install -y curl clang curl libssl-dev llvm \ + libudev-dev protobuf-compiler + + - name: Set up Rust Toolchain + run: curl https://sh.rustup.rs -sSf | sh -s -- -y + + - name: Install substrate-spec-version + run: cargo install substrate-spec-version + + - name: Check-out repository under $GITHUB_WORKSPACE + uses: actions/checkout@v4 + + - name: Check that spec_version has been bumped + run: | + spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version wss://test.finney.opentensor.ai:443 | tr -d '\n') + echo "network spec_version: $spec_version" + : ${spec_version:?bad spec version} + local_spec_version=$(cargo run -p node-subtensor-runtime --bin spec_version | tr -d '\n') + echo "local spec_version: $local_spec_version" + echo "network spec_version: $spec_version" + if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi + echo "$local_spec_version > $spec_version ✅" diff --git a/.github/workflows/devnet-labels.yml b/.github/workflows/devnet-labels.yml new file mode 100644 index 000000000..0fe409d1d --- /dev/null +++ b/.github/workflows/devnet-labels.yml @@ -0,0 +1,18 @@ +name: Tested on Devnet +on: + pull_request: + types: [opened, labeled, unlabeled, synchronize] +jobs: + check-labels: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: mheap/github-action-required-labels@v5 + with: + mode: minimum + count: 1 + labels: | + devnet-pass + devnet-skip diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 000000000..19bda7463 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,55 @@ +name: Publish Docker Image + +on: + push: + branches: + - main + tags: + - '*' + pull_request: + branches: + - main + workflow_dispatch: + +permissions: + contents: read + packages: write + actions: read + security-events: write + +jobs: + publish: + runs-on: SubtensorCI + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: ghcr.io/${{ github.repository }} + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + push: true + tags: | + ${{ steps.meta.outputs.tags }} + ghcr.io/${{ github.repository }}:latest + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/.github/workflows/e2e-bittensor-tests.yml b/.github/workflows/e2e-bittensor-tests.yml index 40da2f67a..773edb566 100644 --- a/.github/workflows/e2e-bittensor-tests.yml +++ b/.github/workflows/e2e-bittensor-tests.yml @@ -75,10 +75,11 @@ jobs: git checkout staging python3 -m pip install -e . python3 -m pip install torch + python3 -m pip install pytest - name: Run tests working-directory: ${{ github.workspace }}/bittensor run: | pwd ls - LOCALNET_SH_PATH="../scripts/localnet.sh" pytest tests/e2e_tests/ -s + LOCALNET_SH_PATH="${{ github.workspace }}/scripts/localnet.sh" pytest tests/e2e_tests/ -s diff --git a/.github/workflows/label-triggers.yml b/.github/workflows/label-triggers.yml new file mode 100644 index 000000000..d32396e07 --- /dev/null +++ b/.github/workflows/label-triggers.yml @@ -0,0 +1,25 @@ +name: Label Triggers +on: + pull_request: + types: + - labeled + +permissions: + issues: write + pull-requests: write + +jobs: + comment_on_breaking_change: + runs-on: ubuntu-latest + steps: + - name: Check if 'breaking change' label is added + if: github.event.label.name == 'breaking-change' + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: '@opentensor/cerebrum / @opentensor/gyrus / @opentensor/cortex breaking change detected! Please prepare accordingly!' + }) diff --git a/.github/workflows/testnet-labels.yml b/.github/workflows/testnet-labels.yml new file mode 100644 index 000000000..f471f9991 --- /dev/null +++ b/.github/workflows/testnet-labels.yml @@ -0,0 +1,18 @@ +name: Tested on Testnet +on: + pull_request: + types: [opened, labeled, unlabeled, synchronize] +jobs: + check-labels: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: mheap/github-action-required-labels@v5 + with: + mode: minimum + count: 1 + labels: | + testnet-pass + testnet-skip diff --git a/.gitignore b/.gitignore index e02967ccd..9cad6e792 100644 --- a/.gitignore +++ b/.gitignore @@ -37,4 +37,7 @@ specs/*.json .vscode # IntelliJ IDEA configuration -.idea \ No newline at end of file +.idea + +# Runtime upgrade snapshot +bt.snap \ No newline at end of file diff --git a/.rustfmt.toml b/.rustfmt.toml index 14c1023f6..24876acd9 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -75,7 +75,7 @@ # required_version = "1.5.1" # unstable_features = false # disable_all_formatting = false -skip_children = true +# skip_children = true # hide_parse_errors = false # error_on_line_overflow = false # error_on_unformatted = false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..d3616041b --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,205 @@ +# Subtensor Contributor Guide + +## Lifecycle of a Pull Request + +1. Individuals wishing to contribute to subtensor should develop their change/feature/fix in a + [Pull Request](https://github.com/opentensor/subtensor/compare) (PR) targeting the `main` + branch of the subtensor GitHub repository. It is recommended to start your pull request as a + draft initially until you are ready to have other developers actively look at it. Any + changes to pallet/runtime code should be accompanied by integration and/or unit tests fully + testing all the edge cases of this functionality, if applicable. +2. Once you have finished developing your change/feature/fix and the Rust portion of the CI is + passing for your PR (everything prefixed with "CI"), you should mark your PR as "Ready for + Review" and request review from "Nucleus". +3. Core Nucleus team members will review your PR, possibly requesting changes, and will also + add appropriate labels to your PR as shown below. Three positive reviews are required. +4. Once the required passing reviews have been obtained, you are ready to request that your PR + be included in the next `devnet` deploy. To do this, you should open a companion PR merging + your branch into the `devnet-ready` branch. You must include a link to the parent PR in the + description and preface your PR title with "(Devnet Ready)" or the PR will be + closed/ignored. +5. A core team administrator will review your "(Devnet Ready)" PR, verifying that it logically + matches the changes introduced in the parent PR (there will sometimes be minor differences + due to merge conflicts) and will either request changes or approve the PR and merge it. Once + your companion PR is merged, the administrator will add the `devnet-ready` label to the + parent PR, indicating that the PR is on the `devnet-ready` branch and will be included in + the next deploy to `devnet`. +6. At some point, a core team administrator will open a PR merging the current `devnet-ready` + branch into `devnet`, and the CI will enforce some additional safety checks on this PR + including a requirement that the new `spec_version` be greater than the current on-chain + `spec_version`. The PR should include a bulleted list of all PRs included in the deploy so + they can be easily found after the fact (TODO: automate this). This PR will require two + reviews from the core team as a sanity check. After merging, the administrator will then + need to update all PRs with the `devnet-ready` label to instead have the `on-devnet` label + (TODO: automate this upon merge). The administrator will then deploy `devnet`. +7. Once the `on-devnet` label appears on your PR, if you are a core team member it is your + responsibility to verify that the features/changes/fixes introduced by your PR are + functioning properly on `devnet` by interacting with the live network. If you are an + external contributor, a core team member will be assigned to test this for you. +8. If your feature/change/fix is confirmed working on `devnet`, the `devnet-pass` label should + be added. Otherwise if there are issues, the `devnet-fail` label should be added and you + will need to make changes to your PR and repeat the previous steps in this process. In some + cases a revert PR will need to be created reverting your changes from the `pre-devnet` and + `devnet` branches, respectively. +9. Once `devnet-pass` has been added to your PR, it is eligible for inclusion in the next + `testnet` deploy. We typically run `testnet` deploys every other wednesday. +10. On the appropriate date, an administrator will open a PR merging the current `devnet` + branch into `testnet`. This PR should include a bulleted list of all PRs included in the + deploy so they can be easily found after the fact (TODO: automate this). The PR should + exclude any PRs that currently have the `devnet-fail` label via a revert (TODO: enforce via + CI). This PR will require two reviews from the core team as a sanity check. After merging + into `testnet`, the administrator will then need to run the deploy and update all PRs + included in the deploy with the `on-testnet` label (TODO: automate this upon merge). Next + the administrator must cut a (pre-release) release in GitHub for `testnet` (TODO: github + action to generate the release and release notes). +11. Once the `on-testnet` label appears on your PR, if you are a core team member it is your + responsibility to once again verify that the features/changes/fixes introduced by your PR + are functioning properly on `testnet` by interacting with the live network, if applicable. + If you are an external contributor, a core team member may be assigned to do this testing + for you but otherwise it will be your responsibility to show evidence on the PR that the + testing is successful. Once this has been verified, the `testnet-pass` label should be + added. If testing fails, the `testnet-fail` label should be added and PRs should be opened + reverting the change from `devnet-ready`, and then a PR should be opened merging the + modified `devnet` into `testnet`. These revert PRs, if they occur, _must_ be merged before + a new deploy can be run (TODO: enforce this via CI). +12. After the SOP period (1 week on `testnet`) has passed and the `testnet-pass` label has been + added, the CI checks on your PR should now turn all green and a core team member will be + able to merge your PR into `main`. At this point your PR is done and is eligible to be + included in the next `finney` deploy (TODO: track and enforce SOP compliance on a per-PR + basis in CI based on the timestamps of label changes). We typically run `finney` deploys + every other Wednesday, so this will typically happen the Wednesday following the Wednesday + your PR was deployed to `testnet`. An administrator will run this deploy. The process the + administrator follows is to open a PR merging `main` into the `finney` branch, which will + always track the current state of `finney`. This PR automatically has some additional + checks on it such as asserting that the spec_version gets bumped properly and other sanity + checks designed to stop a bad deploy. Once the PR is reviewed and merged, the administrator + will run the actual deploy. Once that is successful, the administrator will cut a new + GitHub release tagged off of the latest `main` branch commit that was included in the + deploy, and announcements will be made regarding the release. + +## PR Labels + +| Name | Description | Automations | +| ----- | ----------- | ----------- | +| `red-team` | PR is focused on feature additions/changes | none | +| `blue-team` | PR is focused on preventative/safety measures and/or dev UX improvements | none | +| `runtime` | PR contains substantive changes to runtime / pallet code | none | +| `breaking-change` | PR requires synchronized changes with bittensor | Triggers an automatic bot message so the relevant teams are made aware of the change well in advance | +| `migration` | PR contains one or more migrations | none | +| `devnet-ready` | PR's branch has been merged into the `devnet-ready` branch and will be included in the next `devnet` deploy | none | +| `on-devnet` | PR has been deployed to `devnet` | Removes `devnet-ready` | +| `devnet-pass` | PR has passed manual testing on `devnet` | `devnet-pass` or `devnet-skip` required | +| `devnet-skip` | Allows a critical hotfix PR to skip required testing on `devnet` | `devnet-pass` or `devnet-skip` required | +| `devnet-fail` | PR has failed manual testing on `devnet` and requires modification | none | +| `on-testnet` | PR has been deployed to `testnet` | none | +| `testnet-pass` | PR has passed manual testing on `testnet` | `testnet-pass` or `testnet-skip` required | +| `testnet-skip` | Allows a critical hotfix PR to skip required manual testing and SOP on `testnet` | `testnet-pass` or `testnet-skip` required | +| `testnet-fail` | PR has failed manual testing on `testnet` and requires modification | none | + + +## Branches + + +### `devnet-ready` + +Companion PRs merge into this branch, eventually accumulating into a merge of `devnet-ready` +into `devnet`, coinciding with a deploy of `devnet`. + +#### Restrictions +* no deleting the branch +* no force pushes +* no direct pushes +* require 1 positive review from an administrator +* new code changes invalidate existing reviews +* only merge commit style merging allowed + +#### CI-Enforced Restrictions +* `check-rust.yml` must pass +* TODO: parent PR must be linked to in description +* TODO: parent PR must have the required number of positive reviews + + +### `devnet` + +Tracks the current state of what is deployed to `devnet`. Modified by an administrator via a PR +merging `devnet-ready` into `devnet`, in concert with a deploy of `devnet`. + +#### Restrictions +* no deleting the branch +* no force pushes +* no direct pushes +* require 2 positive reviews from core team members +* new code changes invalidate existing reviews +* only merge commit style merging allowed + +#### CI-Enforced Restrictions +* `check-rust.yml` must pass +* `check-devnet.yml` must pass +* spec_version must be greater than what is currently on live `devnet` +* TODO: other pre-deploy sanity checks here + + +### `testnet` + +Tracks the current state of what is deployed to `testnet`. Administrator will open a PR merging +current `devnet` into `testnet` and merge it in concert with a deploy to `testnet`. Contains +tags for `testnet` releases. + +#### Restrictions +* no deleting the branch +* no force pushes +* no direct pushes +* require 2 positive reviews from core team members +* new code changes invalidate existing reviews +* only merge commit style merging allowed + +#### CI-Enforced Restrictions +* `check-rust.yml` must pass +* `check-testnet.yml` must pass +* spec_version must be greater than what is currently on live `testnet` +* TODO: other pre-deploy sanity checks here + + +### `main` + +Default branch for all new PRs. Slightly ahead of what is currently on `finney`. When a PR is all +green and "done", meaning it has been tested on `devnet` and `testnet`, it can be merged into +`main`. Contains tags for `finney` releases. + +#### Restrictions +* no deleting the branch +* no force pushes +* no direct pushes +* require 3 positive reviews from core team members +* new code changes invalidate existing reviews +* all conversations must be resolved +* only merge commit style merging allowed + +#### CI-Enforced Restrictions +* `check-rust.yml` must pass +* `check-labels.yml` must pass +* must have `devnet-skip` or `devnet-pass` label +* must have `testnet-skip` or `testnet-pass` label +* if `breaking-change` label is present, bot will message the appropriate teams +* TODO: when we get auditing, presence of `needs-audit` label = require a review from auditor +* TODO: track SOP on PR based on label age + + +### `finney` + +Tracks the current state of what is deployed to `finney` (mainnet). Updated via an +administrator-submitted PR merging `main` into `finney` in concert with a `finney` deploy. + +#### Restrictions +* no deleting the branch +* no force pushes +* no direct pushes +* require 3 positive reviews from core team members +* new code changes invalidate existing reviews +* only merge commit style merging allowed + +#### CI-Enforced Restrictions +* `check-rust.yml` must pass +* `check-finney.yml` must pass +* spec_version must be greater than what is currently on live `finney` +* TODO: other pre-deploy sanity checks here diff --git a/Cargo.lock b/Cargo.lock index d5fc4c0ba..f5d934c4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4804,9 +4804,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -4906,6 +4906,7 @@ dependencies = [ "sp-runtime", "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", "sp-weights", + "substrate-fixed", ] [[package]] @@ -5157,6 +5158,7 @@ dependencies = [ "hex-literal", "log", "ndarray", + "num-traits", "pallet-balances", "pallet-collective", "pallet-membership", diff --git a/bt.snap b/bt.snap new file mode 100644 index 000000000..b71074ba4 Binary files /dev/null and b/bt.snap differ diff --git a/docker-compose.yml b/docker-compose.yml index b714d378c..7b76cd053 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,7 +8,7 @@ volumes: services: common: &common - image: opentensor/subtensor:latest + image: ghcr.io/opentensor/subtensor:latest build: context: . dockerfile: Dockerfile @@ -40,8 +40,8 @@ services: --base-path /tmp/blockchain \ --chain raw_spec.json \ --rpc-external --rpc-cors all \ - --ws-external --no-mdns \ - --ws-max-connections 10000 --in-peers 500 --out-peers 500 \ + --no-mdns \ + --in-peers 500 --out-peers 500 \ --bootnodes /dns/bootnode.finney.chain.opentensor.ai/tcp/30333/ws/p2p/12D3KooWRwbMb85RWnT8DSXSYMWQtuDwh4LJzndoRrTDotTR5gDC \ --sync warp @@ -58,8 +58,8 @@ services: --base-path /tmp/blockchain \ --chain raw_spec.json \ --rpc-external --rpc-cors all \ - --ws-external --no-mdns \ - --ws-max-connections 10000 --in-peers 500 --out-peers 500 \ + --no-mdns \ + --in-peers 500 --out-peers 500 \ --bootnodes /dns/bootnode.finney.chain.opentensor.ai/tcp/30333/ws/p2p/12D3KooWRwbMb85RWnT8DSXSYMWQtuDwh4LJzndoRrTDotTR5gDC \ --pruning=archive @@ -76,8 +76,8 @@ services: --base-path /tmp/blockchain \ --chain raw_testspec.json \ --rpc-external --rpc-cors all \ - --ws-external --no-mdns \ - --ws-max-connections 10000 --in-peers 500 --out-peers 500 \ + --no-mdns \ + --in-peers 500 --out-peers 500 \ --bootnodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ --sync warp --reserved-nodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ @@ -96,8 +96,8 @@ services: --base-path /tmp/blockchain \ --chain raw_testspec.json \ --rpc-external --rpc-cors all \ - --ws-external --no-mdns \ - --ws-max-connections 10000 --in-peers 500 --out-peers 500 \ + --no-mdns \ + --in-peers 500 --out-peers 500 \ --bootnodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ --pruning=archive --reserved-nodes /dns/bootnode.test.finney.opentensor.ai/tcp/30333/p2p/12D3KooWPM4mLcKJGtyVtkggqdG84zWrd7Rij6PGQDoijh1X86Vr \ diff --git a/duplicate_stakes.csv b/duplicate_stakes.csv new file mode 100644 index 000000000..c951507c4 --- /dev/null +++ b/duplicate_stakes.csv @@ -0,0 +1 @@ +Hotkey,Coldkey,Occurrences diff --git a/node/src/rpc.rs b/node/src/rpc.rs index 511fb74c3..54f82447f 100644 --- a/node/src/rpc.rs +++ b/node/src/rpc.rs @@ -42,7 +42,7 @@ pub struct FullDeps { /// Grandpa block import setup. pub grandpa: GrandpaDeps, /// Backend used by the node. - pub backend: Arc, + pub _backend: Arc, } /// Instantiate all full RPC extensions. @@ -74,7 +74,7 @@ where pool, deny_unsafe, grandpa, - backend: _, + _backend: _, } = deps; // Custom RPC methods for Paratensor diff --git a/node/src/service.rs b/node/src/service.rs index 284edb52a..9a19ae354 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -266,7 +266,7 @@ pub fn new_full(config: Configuration) -> Result { subscription_executor: subscription_executor.clone(), finality_provider: finality_proof_provider.clone(), }, - backend: rpc_backend.clone(), + _backend: rpc_backend.clone(), }; crate::rpc::create_full(deps).map_err(Into::into) }, diff --git a/pallets/admin-utils/Cargo.toml b/pallets/admin-utils/Cargo.toml index 31c62c7da..894db2694 100644 --- a/pallets/admin-utils/Cargo.toml +++ b/pallets/admin-utils/Cargo.toml @@ -35,6 +35,7 @@ sp-io = { workspace = true } sp-tracing = { workspace = true } sp-consensus-aura = { workspace = true } pallet-balances = { workspace = true, features = ["std"] } +substrate-fixed = { git = 'https://github.com/encointer/substrate-fixed.git', tag = "v0.5.9" } [features] diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 61c29efff..174c861bb 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -7,7 +7,6 @@ pub use weights::WeightInfo; use sp_runtime::DispatchError; use sp_runtime::{traits::Member, RuntimeAppPublic}; -#[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[deny(missing_docs)] @@ -997,6 +996,81 @@ pub mod pallet { log::info!("ToggleSetWeightsCommitReveal( netuid: {:?} ) ", netuid); Ok(()) } + /// Sets the lower bound for the alpha parameter for a given subnet. + /// + /// # Parameters + /// - `origin`: The origin of the call, which must be the root account or subnet owner. + /// - `netuid`: The unique identifier for the subnet. + /// - `alpha_low`: The new lower bound value for the alpha parameter. + /// + /// # Weight + /// This function has a fixed weight of 0 and is classified as an operational transaction that does not incur any fees. + #[pallet::call_index(50)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_alpha_low( + origin: OriginFor, + netuid: u16, + alpha_low: u16, + ) -> DispatchResult { + T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + T::Subtensor::set_alpha_low(netuid, alpha_low)?; + log::info!( + "AlphaLowSet( netuid: {:?}, alpha_low: {:?} ) ", + netuid, + alpha_low + ); + Ok(()) + } + /// Sets the upper bound for the alpha parameter for a given subnet. + /// + /// # Parameters + /// - `origin`: The origin of the call, which must be the root account or subnet owner. + /// - `netuid`: The unique identifier for the subnet. + /// - `alpha_high`: The new upper bound value for the alpha parameter. + /// + /// # Weight + /// This function has a fixed weight of 0 and is classified as an operational transaction that does not incur any fees. + #[pallet::call_index(51)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_alpha_high( + origin: OriginFor, + netuid: u16, + alpha_high: u16, + ) -> DispatchResult { + T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + T::Subtensor::set_alpha_high(netuid, alpha_high)?; + log::info!( + "AlphaHighSet( netuid: {:?}, alpha_high: {:?} ) ", + netuid, + alpha_high + ); + Ok(()) + } + /// Enables or disables Liquid Alpha for a given subnet. + /// + /// # Parameters + /// - `origin`: The origin of the call, which must be the root account or subnet owner. + /// - `netuid`: The unique identifier for the subnet. + /// - `enabled`: A boolean flag to enable or disable Liquid Alpha. + /// + /// # Weight + /// This function has a fixed weight of 0 and is classified as an operational transaction that does not incur any fees. + #[pallet::call_index(52)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_liquid_alpha_enabled( + origin: OriginFor, + netuid: u16, + enabled: bool, + ) -> DispatchResult { + T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + T::Subtensor::set_liquid_alpha_enabled(netuid, enabled); + log::info!( + "LiquidAlphaEnableToggled( netuid: {:?}, Enabled: {:?} ) ", + netuid, + enabled + ); + Ok(()) + } } } @@ -1092,4 +1166,7 @@ pub trait SubtensorInterface { fn set_target_stakes_per_interval(target_stakes_per_interval: u64); fn set_commit_reveal_weights_interval(netuid: u16, interval: u64); fn set_commit_reveal_weights_enabled(netuid: u16, enabled: bool); + fn set_alpha_high(netuid: u16, alpha_high: u16) -> Result<(), DispatchError>; + fn set_alpha_low(netuid: u16, alpha_low: u16) -> Result<(), DispatchError>; + fn set_liquid_alpha_enabled(netuid: u16, enabled: bool); } diff --git a/pallets/admin-utils/src/weights.rs b/pallets/admin-utils/src/weights.rs index ace123b14..774e9742a 100644 --- a/pallets/admin-utils/src/weights.rs +++ b/pallets/admin-utils/src/weights.rs @@ -2,9 +2,9 @@ //! Autogenerated weights for `pallet_admin_utils` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `morpheus`, CPU: `AMD EPYC 7513 32-Core Processor` +//! HOSTNAME: `Samuels-MacBook-Pro`, CPU: `` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: `1024` // Executed Command: @@ -12,7 +12,6 @@ // benchmark // pallet // --chain=local -// --execution=wasm // --wasm-execution=compiled // --pallet=pallet_admin_utils // --extrinsic=* @@ -34,8 +33,7 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_admin_utils`. pub trait WeightInfo { fn swap_authorities(a: u32, ) -> Weight; - fn sudo_set_min_delegate_take() -> Weight; - fn sudo_set_default_take() -> Weight; + fn sudo_set_default_take() -> Weight; fn sudo_set_serving_rate_limit() -> Weight; fn sudo_set_max_difficulty() -> Weight; fn sudo_set_min_difficulty() -> Weight; @@ -51,8 +49,6 @@ pub trait WeightInfo { fn sudo_set_kappa() -> Weight; fn sudo_set_max_allowed_uids() -> Weight; fn sudo_set_min_allowed_weights() -> Weight; - fn sudo_set_validator_prune_len() -> Weight; - fn sudo_set_scaling_law_power() -> Weight; fn sudo_set_immunity_period() -> Weight; fn sudo_set_max_weight_limit() -> Weight; fn sudo_set_max_registrations_per_block() -> Weight; @@ -68,349 +64,313 @@ pub trait WeightInfo { /// Weights for `pallet_admin_utils` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Aura Authorities (r:0 w:1) - /// Proof: Aura Authorities (max_values: Some(1), max_size: Some(1025), added: 1520, mode: MaxEncodedLen) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Aura::Authorities` (r:0 w:1) + /// Proof: `Aura::Authorities` (`max_values`: Some(1), `max_size`: Some(1025), added: 1520, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 32]`. fn swap_authorities(a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `632` - // Estimated: `1127` - // Minimum execution time: 11_490_000 picoseconds. - Weight::from_parts(20_410_228, 1127) - // Standard Error: 8_309 - .saturating_add(Weight::from_parts(199_399, 0).saturating_mul(a.into())) + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(6_454_507, 1485) + // Standard Error: 3_660 + .saturating_add(Weight::from_parts(124_709, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `SubtensorModule::MaxTake` (r:0 w:1) + /// Proof: `SubtensorModule::MaxTake` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn sudo_set_default_take() -> Weight { // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_min_delegate_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule ServingRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule ServingRateLimit (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::ServingRateLimit` (r:0 w:1) + /// Proof: `SubtensorModule::ServingRateLimit` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_serving_rate_limit() -> Weight { // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 27_700_000 picoseconds. - Weight::from_parts(28_290_000, 655) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxDifficulty (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxDifficulty` (r:0 w:1) + /// Proof: `SubtensorModule::MaxDifficulty` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_difficulty() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MinDifficulty (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MinDifficulty` (r:0 w:1) + /// Proof: `SubtensorModule::MinDifficulty` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_min_difficulty() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_110_000 picoseconds. - Weight::from_parts(46_909_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsSetRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsSetRateLimit (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::WeightsSetRateLimit` (r:0 w:1) + /// Proof: `SubtensorModule::WeightsSetRateLimit` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_weights_set_rate_limit() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_349_000 picoseconds. - Weight::from_parts(46_970_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsVersionKey (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsVersionKey (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::WeightsVersionKey` (r:0 w:1) + /// Proof: `SubtensorModule::WeightsVersionKey` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_weights_version_key() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_940_000 picoseconds. - Weight::from_parts(47_460_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsMovingAverage (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsMovingAverage (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::BondsMovingAverage` (r:0 w:1) + /// Proof: `SubtensorModule::BondsMovingAverage` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_bonds_moving_average() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:1 w:0) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedValidators (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedValidators (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxAllowedUids` (r:1 w:0) + /// Proof: `SubtensorModule::MaxAllowedUids` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxAllowedValidators` (r:0 w:1) + /// Proof: `SubtensorModule::MaxAllowedValidators` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_allowed_validators() -> Weight { // Proof Size summary in bytes: - // Measured: `1154` - // Estimated: `8412` - // Minimum execution time: 52_599_000 picoseconds. - Weight::from_parts(53_640_000, 8412) + // Measured: `499` + // Estimated: `3964` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(21_000_000, 3964) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Difficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule Difficulty (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Difficulty` (r:0 w:1) + /// Proof: `SubtensorModule::Difficulty` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_difficulty() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_240_000 picoseconds. - Weight::from_parts(47_130_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule AdjustmentInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule AdjustmentInterval (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::AdjustmentInterval` (r:0 w:1) + /// Proof: `SubtensorModule::AdjustmentInterval` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_adjustment_interval() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_430_000 picoseconds. - Weight::from_parts(46_790_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule TargetRegistrationsPerInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule TargetRegistrationsPerInterval (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::TargetRegistrationsPerInterval` (r:0 w:1) + /// Proof: `SubtensorModule::TargetRegistrationsPerInterval` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_target_registrations_per_interval() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_899_000 picoseconds. - Weight::from_parts(47_099_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ActivityCutoff (r:0 w:1) - /// Proof Skipped: SubtensorModule ActivityCutoff (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::ActivityCutoff` (r:0 w:1) + /// Proof: `SubtensorModule::ActivityCutoff` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_activity_cutoff() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_029_000 picoseconds. - Weight::from_parts(46_759_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Rho (r:0 w:1) - /// Proof Skipped: SubtensorModule Rho (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Rho` (r:0 w:1) + /// Proof: `SubtensorModule::Rho` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_rho() -> Weight { // Proof Size summary in bytes: - // Measured: `903` - // Estimated: `4281` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_820_000, 4281) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Kappa (r:0 w:1) - /// Proof Skipped: SubtensorModule Kappa (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Kappa` (r:0 w:1) + /// Proof: `SubtensorModule::Kappa` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_kappa() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_620_000 picoseconds. - Weight::from_parts(46_440_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule SubnetworkN (r:1 w:0) - /// Proof Skipped: SubtensorModule SubnetworkN (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::SubnetworkN` (r:1 w:0) + /// Proof: `SubtensorModule::SubnetworkN` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxAllowedUids` (r:0 w:1) + /// Proof: `SubtensorModule::MaxAllowedUids` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_allowed_uids() -> Weight { // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `8301` - // Minimum execution time: 50_270_000 picoseconds. - Weight::from_parts(51_149_000, 8301) + // Measured: `462` + // Estimated: `3927` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 3927) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinAllowedWeights (r:0 w:1) - /// Proof Skipped: SubtensorModule MinAllowedWeights (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MinAllowedWeights` (r:0 w:1) + /// Proof: `SubtensorModule::MinAllowedWeights` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_min_allowed_weights() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_990_000 picoseconds. - Weight::from_parts(47_390_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ValidatorPruneLen (r:0 w:1) - /// Proof Skipped: SubtensorModule ValidatorPruneLen (max_values: None, max_size: None, mode: Measured) - fn sudo_set_validator_prune_len() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_939_000 picoseconds. - Weight::from_parts(46_960_000, 4697) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ScalingLawPower (r:0 w:1) - /// Proof Skipped: SubtensorModule ScalingLawPower (max_values: None, max_size: None, mode: Measured) - fn sudo_set_scaling_law_power() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_480_000 picoseconds. - Weight::from_parts(46_590_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ImmunityPeriod (r:0 w:1) - /// Proof Skipped: SubtensorModule ImmunityPeriod (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::ImmunityPeriod` (r:0 w:1) + /// Proof: `SubtensorModule::ImmunityPeriod` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_immunity_period() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_289_000 picoseconds. - Weight::from_parts(46_679_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxWeightsLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxWeightsLimit (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxWeightsLimit` (r:0 w:1) + /// Proof: `SubtensorModule::MaxWeightsLimit` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_weight_limit() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_850_000 picoseconds. - Weight::from_parts(46_589_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxRegistrationsPerBlock (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxRegistrationsPerBlock (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxRegistrationsPerBlock` (r:0 w:1) + /// Proof: `SubtensorModule::MaxRegistrationsPerBlock` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_registrations_per_block() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_330_000 picoseconds. - Weight::from_parts(46_490_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxBurn (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxBurn` (r:0 w:1) + /// Proof: `SubtensorModule::MaxBurn` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_burn() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_390_000 picoseconds. - Weight::from_parts(46_339_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MinBurn (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MinBurn` (r:0 w:1) + /// Proof: `SubtensorModule::MinBurn` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_min_burn() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_189_000 picoseconds. - Weight::from_parts(46_109_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworkPowRegistrationAllowed (r:0 w:1) - /// Proof Skipped: SubtensorModule NetworkPowRegistrationAllowed (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworkRegistrationAllowed` (r:0 w:1) + /// Proof: `SubtensorModule::NetworkRegistrationAllowed` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_network_registration_allowed() -> Weight { // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 33_600_000 picoseconds. - Weight::from_parts(34_599_000, 655) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Tempo (r:0 w:1) - /// Proof Skipped: SubtensorModule Tempo (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Tempo` (r:0 w:1) + /// Proof: `SubtensorModule::Tempo` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_tempo() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 44_739_000 picoseconds. - Weight::from_parts(45_489_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -436,349 +396,313 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Aura Authorities (r:0 w:1) - /// Proof: Aura Authorities (max_values: Some(1), max_size: Some(1025), added: 1520, mode: MaxEncodedLen) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Aura::Authorities` (r:0 w:1) + /// Proof: `Aura::Authorities` (`max_values`: Some(1), `max_size`: Some(1025), added: 1520, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 32]`. fn swap_authorities(a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `632` - // Estimated: `1127` - // Minimum execution time: 11_490_000 picoseconds. - Weight::from_parts(20_410_228, 1127) - // Standard Error: 8_309 - .saturating_add(Weight::from_parts(199_399, 0).saturating_mul(a.into())) + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(6_454_507, 1485) + // Standard Error: 3_660 + .saturating_add(Weight::from_parts(124_709, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `SubtensorModule::MaxTake` (r:0 w:1) + /// Proof: `SubtensorModule::MaxTake` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn sudo_set_default_take() -> Weight { // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule DefaultTake (r:0 w:1) - /// Proof Skipped: SubtensorModule DefaultTake (max_values: Some(1), max_size: None, mode: Measured) - fn sudo_set_min_delegate_take() -> Weight { - // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 26_770_000 picoseconds. - Weight::from_parts(27_199_000, 655) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule ServingRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule ServingRateLimit (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::ServingRateLimit` (r:0 w:1) + /// Proof: `SubtensorModule::ServingRateLimit` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_serving_rate_limit() -> Weight { // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 27_700_000 picoseconds. - Weight::from_parts(28_290_000, 655) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxDifficulty (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxDifficulty` (r:0 w:1) + /// Proof: `SubtensorModule::MaxDifficulty` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_difficulty() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinDifficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule MinDifficulty (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MinDifficulty` (r:0 w:1) + /// Proof: `SubtensorModule::MinDifficulty` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_min_difficulty() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_110_000 picoseconds. - Weight::from_parts(46_909_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsSetRateLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsSetRateLimit (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::WeightsSetRateLimit` (r:0 w:1) + /// Proof: `SubtensorModule::WeightsSetRateLimit` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_weights_set_rate_limit() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_349_000 picoseconds. - Weight::from_parts(46_970_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule WeightsVersionKey (r:0 w:1) - /// Proof Skipped: SubtensorModule WeightsVersionKey (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::WeightsVersionKey` (r:0 w:1) + /// Proof: `SubtensorModule::WeightsVersionKey` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_weights_version_key() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_940_000 picoseconds. - Weight::from_parts(47_460_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule BondsMovingAverage (r:0 w:1) - /// Proof Skipped: SubtensorModule BondsMovingAverage (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::BondsMovingAverage` (r:0 w:1) + /// Proof: `SubtensorModule::BondsMovingAverage` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_bonds_moving_average() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_099_000 picoseconds. - Weight::from_parts(47_510_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:1 w:0) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedValidators (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedValidators (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxAllowedUids` (r:1 w:0) + /// Proof: `SubtensorModule::MaxAllowedUids` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxAllowedValidators` (r:0 w:1) + /// Proof: `SubtensorModule::MaxAllowedValidators` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_allowed_validators() -> Weight { // Proof Size summary in bytes: - // Measured: `1154` - // Estimated: `8412` - // Minimum execution time: 52_599_000 picoseconds. - Weight::from_parts(53_640_000, 8412) + // Measured: `499` + // Estimated: `3964` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(21_000_000, 3964) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Difficulty (r:0 w:1) - /// Proof Skipped: SubtensorModule Difficulty (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Difficulty` (r:0 w:1) + /// Proof: `SubtensorModule::Difficulty` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_difficulty() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_240_000 picoseconds. - Weight::from_parts(47_130_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule AdjustmentInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule AdjustmentInterval (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::AdjustmentInterval` (r:0 w:1) + /// Proof: `SubtensorModule::AdjustmentInterval` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_adjustment_interval() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_430_000 picoseconds. - Weight::from_parts(46_790_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule TargetRegistrationsPerInterval (r:0 w:1) - /// Proof Skipped: SubtensorModule TargetRegistrationsPerInterval (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::TargetRegistrationsPerInterval` (r:0 w:1) + /// Proof: `SubtensorModule::TargetRegistrationsPerInterval` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_target_registrations_per_interval() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_899_000 picoseconds. - Weight::from_parts(47_099_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ActivityCutoff (r:0 w:1) - /// Proof Skipped: SubtensorModule ActivityCutoff (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::ActivityCutoff` (r:0 w:1) + /// Proof: `SubtensorModule::ActivityCutoff` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_activity_cutoff() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_029_000 picoseconds. - Weight::from_parts(46_759_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Rho (r:0 w:1) - /// Proof Skipped: SubtensorModule Rho (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Rho` (r:0 w:1) + /// Proof: `SubtensorModule::Rho` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_rho() -> Weight { // Proof Size summary in bytes: - // Measured: `903` - // Estimated: `4281` - // Minimum execution time: 30_980_000 picoseconds. - Weight::from_parts(31_820_000, 4281) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Kappa (r:0 w:1) - /// Proof Skipped: SubtensorModule Kappa (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Kappa` (r:0 w:1) + /// Proof: `SubtensorModule::Kappa` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_kappa() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_620_000 picoseconds. - Weight::from_parts(46_440_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule SubnetworkN (r:1 w:0) - /// Proof Skipped: SubtensorModule SubnetworkN (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxAllowedUids (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxAllowedUids (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::SubnetworkN` (r:1 w:0) + /// Proof: `SubtensorModule::SubnetworkN` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxAllowedUids` (r:0 w:1) + /// Proof: `SubtensorModule::MaxAllowedUids` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_allowed_uids() -> Weight { // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `8301` - // Minimum execution time: 50_270_000 picoseconds. - Weight::from_parts(51_149_000, 8301) + // Measured: `462` + // Estimated: `3927` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 3927) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinAllowedWeights (r:0 w:1) - /// Proof Skipped: SubtensorModule MinAllowedWeights (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MinAllowedWeights` (r:0 w:1) + /// Proof: `SubtensorModule::MinAllowedWeights` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_min_allowed_weights() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_990_000 picoseconds. - Weight::from_parts(47_390_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ValidatorPruneLen (r:0 w:1) - /// Proof Skipped: SubtensorModule ValidatorPruneLen (max_values: None, max_size: None, mode: Measured) - fn sudo_set_validator_prune_len() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_939_000 picoseconds. - Weight::from_parts(46_960_000, 4697) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ScalingLawPower (r:0 w:1) - /// Proof Skipped: SubtensorModule ScalingLawPower (max_values: None, max_size: None, mode: Measured) - fn sudo_set_scaling_law_power() -> Weight { - // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_480_000 picoseconds. - Weight::from_parts(46_590_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule ImmunityPeriod (r:0 w:1) - /// Proof Skipped: SubtensorModule ImmunityPeriod (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::ImmunityPeriod` (r:0 w:1) + /// Proof: `SubtensorModule::ImmunityPeriod` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_immunity_period() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_289_000 picoseconds. - Weight::from_parts(46_679_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxWeightsLimit (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxWeightsLimit (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxWeightsLimit` (r:0 w:1) + /// Proof: `SubtensorModule::MaxWeightsLimit` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_weight_limit() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_850_000 picoseconds. - Weight::from_parts(46_589_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxRegistrationsPerBlock (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxRegistrationsPerBlock (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxRegistrationsPerBlock` (r:0 w:1) + /// Proof: `SubtensorModule::MaxRegistrationsPerBlock` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_registrations_per_block() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_330_000 picoseconds. - Weight::from_parts(46_490_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MaxBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MaxBurn (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MaxBurn` (r:0 w:1) + /// Proof: `SubtensorModule::MaxBurn` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_max_burn() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_390_000 picoseconds. - Weight::from_parts(46_339_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule MinBurn (r:0 w:1) - /// Proof Skipped: SubtensorModule MinBurn (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::MinBurn` (r:0 w:1) + /// Proof: `SubtensorModule::MinBurn` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_min_burn() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 45_189_000 picoseconds. - Weight::from_parts(46_109_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(15_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworkPowRegistrationAllowed (r:0 w:1) - /// Proof Skipped: SubtensorModule NetworkPowRegistrationAllowed (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworkRegistrationAllowed` (r:0 w:1) + /// Proof: `SubtensorModule::NetworkRegistrationAllowed` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_network_registration_allowed() -> Weight { // Proof Size summary in bytes: - // Measured: `655` - // Estimated: `655` - // Minimum execution time: 33_600_000 picoseconds. - Weight::from_parts(34_599_000, 655) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: SubtensorModule NetworksAdded (r:1 w:0) - /// Proof Skipped: SubtensorModule NetworksAdded (max_values: None, max_size: None, mode: Measured) - /// Storage: SubtensorModule Tempo (r:0 w:1) - /// Proof Skipped: SubtensorModule Tempo (max_values: None, max_size: None, mode: Measured) + /// Storage: `SubtensorModule::NetworksAdded` (r:1 w:0) + /// Proof: `SubtensorModule::NetworksAdded` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SubtensorModule::Tempo` (r:0 w:1) + /// Proof: `SubtensorModule::Tempo` (`max_values`: None, `max_size`: None, mode: `Measured`) fn sudo_set_tempo() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 44_739_000 picoseconds. - Weight::from_parts(45_489_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 3921) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/admin-utils/tests/mock.rs b/pallets/admin-utils/tests/mock.rs index c0985b010..48d41de75 100644 --- a/pallets/admin-utils/tests/mock.rs +++ b/pallets/admin-utils/tests/mock.rs @@ -108,7 +108,9 @@ parameter_types! { pub const InitialSubnetLimit: u16 = 10; // Max 10 subnets. pub const InitialNetworkRateLimit: u64 = 0; pub const InitialTargetStakesPerInterval: u16 = 1; - + pub const InitialAlphaHigh: u16 = 900; // Represents 0.9 as per the production default + pub const InitialAlphaLow: u16 = 700; // Represents 0.7 as per the production default + pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn } impl pallet_subtensor::Config for Test { @@ -160,6 +162,9 @@ impl pallet_subtensor::Config for Test { type InitialSubnetLimit = InitialSubnetLimit; type InitialNetworkRateLimit = InitialNetworkRateLimit; type InitialTargetStakesPerInterval = InitialTargetStakesPerInterval; + type AlphaHigh = InitialAlphaHigh; + type AlphaLow = InitialAlphaLow; + type LiquidAlphaOn = InitialLiquidAlphaOn; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -462,6 +467,18 @@ impl pallet_admin_utils::SubtensorInterface f fn set_commit_reveal_weights_enabled(netuid: u16, enabled: bool) { SubtensorModule::set_commit_reveal_weights_enabled(netuid, enabled); } + + fn set_alpha_high(netuid: u16, alpha_high: u16) -> Result<(), DispatchError> { + SubtensorModule::set_alpha_high(netuid, alpha_high) + } + + fn set_alpha_low(netuid: u16, alpha_low: u16) -> Result<(), DispatchError> { + SubtensorModule::set_alpha_low(netuid, alpha_low) + } + + fn set_liquid_alpha_enabled(netuid: u16, enabled: bool) { + SubtensorModule::set_liquid_alpha_enabled(netuid, enabled); + } } impl pallet_admin_utils::Config for Test { diff --git a/pallets/admin-utils/tests/tests.rs b/pallets/admin-utils/tests/tests.rs index f87b43e74..a71ce6002 100644 --- a/pallets/admin-utils/tests/tests.rs +++ b/pallets/admin-utils/tests/tests.rs @@ -4,6 +4,7 @@ use frame_system::Config; use pallet_admin_utils::Error; use pallet_subtensor::Event; use sp_core::U256; +use substrate_fixed::types::I32F32; mod mock; use mock::*; @@ -1178,3 +1179,80 @@ fn test_sudo_set_target_stakes_per_interval() { assert_eq!(SubtensorModule::get_target_stakes_per_interval(), to_be_set); }); } + +#[test] +fn test_sudo_set_alpha_high() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let to_be_set: u16 = 10; + let init_value = SubtensorModule::get_alpha_high(netuid); + assert_eq!( + AdminUtils::sudo_set_alpha_high( + <::RuntimeOrigin>::signed(U256::from(1)), + netuid, + to_be_set + ), + Err(DispatchError::BadOrigin.into()) + ); + assert_eq!(SubtensorModule::get_alpha_high(netuid), init_value); + assert_ok!(AdminUtils::sudo_set_liquid_alpha_enabled( + <::RuntimeOrigin>::root(), + netuid, + true, + )); + assert_ok!(AdminUtils::sudo_set_alpha_high( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + let expected_value: I32F32 = I32F32::from_num(to_be_set as f64 / 1000.0); + assert_eq!(SubtensorModule::get_alpha_high(netuid), expected_value); + }); +} + +#[test] +fn test_sudo_set_alpha_low() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let to_be_set: u16 = 10; + let init_value = SubtensorModule::get_alpha_low(netuid); + assert_eq!( + AdminUtils::sudo_set_alpha_low( + <::RuntimeOrigin>::signed(U256::from(1)), + netuid, + to_be_set + ), + Err(DispatchError::BadOrigin.into()) + ); + assert_eq!(SubtensorModule::get_alpha_low(netuid), init_value); + assert_ok!(AdminUtils::sudo_set_liquid_alpha_enabled( + <::RuntimeOrigin>::root(), + netuid, + true, + )); + assert_ok!(AdminUtils::sudo_set_alpha_low( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + let expected_value: I32F32 = I32F32::from_num(to_be_set as f64 / 1000.0); + assert_eq!(SubtensorModule::get_alpha_low(netuid), expected_value); + }); +} + +#[test] +fn test_sudo_set_liquid_alpha_enabled() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + let enabled: bool = true; + assert_eq!(!enabled, SubtensorModule::get_liquid_alpha_enabled(netuid)); + + assert_ok!(AdminUtils::sudo_set_liquid_alpha_enabled( + <::RuntimeOrigin>::root(), + netuid, + enabled + )); + + assert_eq!(enabled, SubtensorModule::get_liquid_alpha_enabled(netuid)); + }); +} diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 6e252ecea..dec9177d2 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -1,11 +1,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; #[cfg(test)] mod tests; -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - pub mod types; pub mod weights; @@ -234,7 +232,7 @@ where pub fn get_priority_vanilla() -> u64 { // Return high priority so that every extrinsic except set_weights function will // have a higher priority than the set_weights call - u64::max_value() + u64::MAX } } diff --git a/pallets/registry/src/lib.rs b/pallets/registry/src/lib.rs index 026c03260..e32b1fc5f 100644 --- a/pallets/registry/src/lib.rs +++ b/pallets/registry/src/lib.rs @@ -3,7 +3,6 @@ #[cfg(test)] mod tests; -#[cfg(feature = "runtime-benchmarks")] mod benchmarking; pub mod types; pub mod weights; diff --git a/pallets/subtensor/Cargo.toml b/pallets/subtensor/Cargo.toml index cbcf76c82..7b9f6bc37 100644 --- a/pallets/subtensor/Cargo.toml +++ b/pallets/subtensor/Cargo.toml @@ -43,6 +43,7 @@ hex = { workspace = true } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../collective" } pallet-membership = { workspace = true } hex-literal = { workspace = true } +num-traits = { version = "0.2.19", default-features = false, features = ["libm"] } [dev-dependencies] pallet-balances = { workspace = true, features = ["std"] } diff --git a/pallets/subtensor/src/epoch.rs b/pallets/subtensor/src/epoch.rs index cc146dd59..40673c02b 100644 --- a/pallets/subtensor/src/epoch.rs +++ b/pallets/subtensor/src/epoch.rs @@ -118,19 +118,19 @@ impl Pallet { // Mask weights that are not from permitted validators. inplace_mask_rows(&validator_forbids, &mut weights); - // log::trace!( "W (permit): {:?}", &weights ); + log::trace!("W (permit): {:?}", &weights); // Remove self-weight by masking diagonal. inplace_mask_diag(&mut weights); - // log::trace!( "W (permit+diag):\n{:?}\n", &weights ); + log::trace!("W (permit+diag):\n{:?}\n", &weights); // Mask outdated weights: remove weights referring to deregistered neurons. inplace_mask_matrix(&outdated, &mut weights); - // log::trace!( "W (permit+diag+outdate):\n{:?}\n", &weights ); + log::trace!("W (permit+diag+outdate):\n{:?}\n", &weights); // Normalize remaining weights. inplace_row_normalize(&mut weights); - // log::trace!( "W (mask+norm):\n{:?}\n", &weights ); + log::trace!("W (mask+norm):\n{:?}\n", &weights); // ================================ // == Consensus, Validator Trust == @@ -167,20 +167,17 @@ impl Pallet { let mut bonds: Vec> = Self::get_bonds(netuid); inplace_mask_matrix(&outdated, &mut bonds); // mask outdated bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 - // log::trace!( "B:\n{:?}\n", &bonds ); + log::trace!("B:\n{:?}\n", &bonds); // Compute bonds delta column normalized. let mut bonds_delta: Vec> = row_hadamard(&weights, &active_stake); // ΔB = W◦S inplace_col_normalize(&mut bonds_delta); // sum_i b_ij = 1 - // log::trace!( "ΔB:\n{:?}\n", &bonds_delta ); + log::trace!("ΔB:\n{:?}\n", &bonds_delta); + // Compute the Exponential Moving Average (EMA) of bonds. + let mut ema_bonds = Self::compute_ema_bonds(netuid, consensus.clone(), bonds_delta, bonds); - // Compute bonds moving average. - let bonds_moving_average: I64F64 = - I64F64::from_num(Self::get_bonds_moving_average(netuid)) / I64F64::from_num(1_000_000); - let alpha: I32F32 = I32F32::from_num(1) - I32F32::from_num(bonds_moving_average); - let mut ema_bonds: Vec> = mat_ema(&bonds_delta, &bonds, alpha); inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 - // log::trace!( "emaB:\n{:?}\n", &ema_bonds ); + log::trace!("emaB:\n{:?}\n", &ema_bonds); // Compute dividends: d_i = SUM(j) b_ij * inc_j let mut dividends: Vec = matmul_transpose(&ema_bonds, &incentive); @@ -355,7 +352,7 @@ impl Pallet { pub fn epoch(netuid: u16, rao_emission: u64) -> Vec<(T::AccountId, u64, u64)> { // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); - log::trace!("n: {:?}", n); + log::trace!("Number of Neurons in Network: {:?}", n); // ====================== // == Active & updated == @@ -401,10 +398,11 @@ impl Pallet { for (uid_i, hotkey) in &hotkeys { stake_64[*uid_i as usize] = I64F64::from_num(Self::get_total_stake_for_hotkey(hotkey)); } + log::trace!("Stake : {:?}", &stake_64); inplace_normalize_64(&mut stake_64); let stake: Vec = vec_fixed64_to_fixed32(stake_64); // range: I32F32(0, 1) - log::trace!("S: {:?}", &stake); + log::trace!("Normalised Stake: {:?}", &stake); // ======================= // == Validator permits == @@ -439,7 +437,7 @@ impl Pallet { // Normalize active stake. inplace_normalize(&mut active_stake); - log::trace!("S:\n{:?}\n", &active_stake); + log::trace!("Active Stake:\n{:?}\n", &active_stake); // ============= // == Weights == @@ -447,15 +445,15 @@ impl Pallet { // Access network weights row unnormalized. let mut weights: Vec> = Self::get_weights_sparse(netuid); - // log::trace!( "W: {:?}", &weights ); + log::trace!("Weights: {:?}", &weights); // Mask weights that are not from permitted validators. weights = mask_rows_sparse(&validator_forbids, &weights); - // log::trace!( "W (permit): {:?}", &weights ); + log::trace!("Weights (permit): {:?}", &weights); // Remove self-weight by masking diagonal. weights = mask_diag_sparse(&weights); - // log::trace!( "W (permit+diag): {:?}", &weights ); + log::trace!("Weights (permit+diag): {:?}", &weights); // Remove weights referring to deregistered neurons. weights = vec_mask_sparse_matrix( @@ -464,11 +462,11 @@ impl Pallet { &block_at_registration, &|updated, registered| updated <= registered, ); - // log::trace!( "W (permit+diag+outdate): {:?}", &weights ); + log::trace!("Weights (permit+diag+outdate): {:?}", &weights); // Normalize remaining weights. inplace_row_normalize_sparse(&mut weights); - // log::trace!( "W (mask+norm): {:?}", &weights ); + log::trace!("Weights (mask+norm): {:?}", &weights); // ================================ // == Consensus, Validator Trust == @@ -476,18 +474,18 @@ impl Pallet { // Compute preranks: r_j = SUM(i) w_ij * s_i let preranks: Vec = matmul_sparse(&weights, &active_stake, n); - // log::trace!( "R (before): {:?}", &preranks ); + log::trace!("Ranks (before): {:?}", &preranks); // Clip weights at majority consensus let kappa: I32F32 = Self::get_float_kappa(netuid); // consensus majority ratio, e.g. 51%. let consensus: Vec = weighted_median_col_sparse(&active_stake, &weights, n, kappa); - log::trace!("C: {:?}", &consensus); + log::trace!("Consensus: {:?}", &consensus); weights = col_clip_sparse(&weights, &consensus); - // log::trace!( "W: {:?}", &weights ); + log::trace!("Weights: {:?}", &weights); let validator_trust: Vec = row_sum_sparse(&weights); - log::trace!("Tv: {:?}", &validator_trust); + log::trace!("Validator Trust: {:?}", &validator_trust); // ============================= // == Ranks, Trust, Incentive == @@ -495,7 +493,7 @@ impl Pallet { // Compute ranks: r_j = SUM(i) w_ij * s_i. let mut ranks: Vec = matmul_sparse(&weights, &active_stake, n); - // log::trace!( "R (after): {:?}", &ranks ); + log::trace!("Ranks (after): {:?}", &ranks); // Compute server trust: ratio of rank after vs. rank before. let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) @@ -503,7 +501,7 @@ impl Pallet { inplace_normalize(&mut ranks); // range: I32F32(0, 1) let incentive: Vec = ranks.clone(); - log::trace!("I (=R): {:?}", &incentive); + log::trace!("Incentive (=Rank): {:?}", &incentive); // ========================= // == Bonds and Dividends == @@ -511,7 +509,7 @@ impl Pallet { // Access network bonds. let mut bonds: Vec> = Self::get_bonds_sparse(netuid); - // log::trace!( "B: {:?}", &bonds ); + log::trace!("B: {:?}", &bonds); // Remove bonds referring to deregistered neurons. bonds = vec_mask_sparse_matrix( @@ -520,35 +518,32 @@ impl Pallet { &block_at_registration, &|updated, registered| updated <= registered, ); - // log::trace!( "B (outdatedmask): {:?}", &bonds ); + log::trace!("B (outdatedmask): {:?}", &bonds); // Normalize remaining bonds: sum_i b_ij = 1. inplace_col_normalize_sparse(&mut bonds, n); - // log::trace!( "B (mask+norm): {:?}", &bonds ); + log::trace!("B (mask+norm): {:?}", &bonds); // Compute bonds delta column normalized. let mut bonds_delta: Vec> = row_hadamard_sparse(&weights, &active_stake); // ΔB = W◦S (outdated W masked) - // log::trace!( "ΔB: {:?}", &bonds_delta ); + log::trace!("ΔB: {:?}", &bonds_delta); // Normalize bonds delta. inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 - // log::trace!( "ΔB (norm): {:?}", &bonds_delta ); - - // Compute bonds moving average. - let bonds_moving_average: I64F64 = - I64F64::from_num(Self::get_bonds_moving_average(netuid)) / I64F64::from_num(1_000_000); - let alpha: I32F32 = I32F32::from_num(1) - I32F32::from_num(bonds_moving_average); - let mut ema_bonds: Vec> = mat_ema_sparse(&bonds_delta, &bonds, alpha); + log::trace!("ΔB (norm): {:?}", &bonds_delta); + // Compute the Exponential Moving Average (EMA) of bonds. + let mut ema_bonds = + Self::compute_ema_bonds_sparse(netuid, consensus.clone(), bonds_delta, bonds); // Normalize EMA bonds. inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 - // log::trace!( "emaB: {:?}", &ema_bonds ); + log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); // Compute dividends: d_i = SUM(j) b_ij * inc_j. // range: I32F32(0, 1) let mut dividends: Vec = matmul_transpose_sparse(&ema_bonds, &incentive); inplace_normalize(&mut dividends); - log::trace!("D: {:?}", ÷nds); + log::trace!("Dividends: {:?}", ÷nds); // ================================= // == Emission and Pruning scores == @@ -614,16 +609,25 @@ impl Pallet { .map(|e: &I96F32| e.to_num::()) .collect(); - log::trace!("nSE: {:?}", &normalized_server_emission); - log::trace!("SE: {:?}", &server_emission); - log::trace!("nVE: {:?}", &normalized_validator_emission); - log::trace!("VE: {:?}", &validator_emission); - log::trace!("nCE: {:?}", &normalized_combined_emission); - log::trace!("CE: {:?}", &combined_emission); + log::trace!( + "Normalized Server Emission: {:?}", + &normalized_server_emission + ); + log::trace!("Server Emission: {:?}", &server_emission); + log::trace!( + "Normalized Validator Emission: {:?}", + &normalized_validator_emission + ); + log::trace!("Validator Emission: {:?}", &validator_emission); + log::trace!( + "Normalized Combined Emission: {:?}", + &normalized_combined_emission + ); + log::trace!("Combined Emission: {:?}", &combined_emission); // Set pruning scores using combined emission scores. let pruning_scores: Vec = normalized_combined_emission.clone(); - log::trace!("P: {:?}", &pruning_scores); + log::trace!("Pruning Scores: {:?}", &pruning_scores); // =================== // == Value storage == @@ -811,4 +815,386 @@ impl Pallet { } bonds } + + /// Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. + /// + /// # Args: + /// * `alpha_high` - The high alpha value. + /// * `alpha_low` - The low alpha value. + /// * `consensus_high` - The high consensus value. + /// * `consensus_low` - The low consensus value. + /// + /// # Returns: + /// A tuple containing the slope 'a' and intercept 'b' for the logistic function. + pub fn calculate_logistic_params( + alpha_high: I32F32, + alpha_low: I32F32, + consensus_high: I32F32, + consensus_low: I32F32, + ) -> (I32F32, I32F32) { + log::info!("alpha_high: {:?}", alpha_high); + log::info!("alpha_low: {:?}", alpha_low); + log::trace!("consensus_high: {:?}", consensus_high); + log::trace!("consensus_low: {:?}", consensus_low); + // Check for division by zero + // extra caution to ensure we never divide by zero + if consensus_high == consensus_low + || consensus_high < consensus_low + || alpha_low == 0 + || alpha_high == 0 + { + // Return 0 for both 'a' and 'b' when consensus values are equal + return (I32F32::from_num(0.0), I32F32::from_num(0.0)); + } + + // Calculate the slope 'a' of the logistic function. + // a = (ln((1 / alpha_high - 1)) - ln((1 / alpha_low - 1))) / (consensus_low - consensus_high) + let a = (safe_ln( + I32F32::from_num(1.0) + .saturating_div(alpha_high) + .saturating_sub(I32F32::from_num(1.0)), + ) + .saturating_sub(safe_ln( + I32F32::from_num(1.0) + .saturating_div(alpha_low) + .saturating_sub(I32F32::from_num(1.0)), + ))) + .saturating_div(consensus_low.saturating_sub(consensus_high)); + log::trace!("a: {:?}", a); + + // Calculate the intercept 'b' of the logistic function. + // b = ln((1 / alpha_low - 1)) + a * consensus_low + let b = safe_ln( + I32F32::from_num(1.0) + .saturating_div(alpha_low) + .saturating_sub(I32F32::from_num(1.0)), + ) + .saturating_add(a.saturating_mul(consensus_low)); + log::trace!("b: {:?}", b); + + // Return the calculated slope 'a' and intercept 'b'. + (a, b) + } + + /// Compute the alpha values using the logistic function parameters 'a' and 'b'. + /// + /// # Args: + /// * `consensus` - A vector of consensus values. + /// * `a` - The slope of the logistic function. + /// * `b` - The intercept of the logistic function. + /// + /// # Returns: + /// A vector of computed alpha values. + pub fn compute_alpha_values(consensus: &Vec, a: I32F32, b: I32F32) -> Vec { + // Compute the alpha values for each consensus value. + let alpha: Vec = consensus + .iter() + .map(|c| { + // Calculate the exponent value for the logistic function. + // exp_val = exp(b - a * c) + let exp_val = safe_exp(b.saturating_sub(a.saturating_mul(*c))); + + // Compute the alpha value using the logistic function formula. + // alpha = 1 / (1 + exp_val) + I32F32::from_num(1.0).saturating_div(I32F32::from_num(1.0).saturating_add(exp_val)) + }) + .collect(); + + // Log the computed alpha values for debugging purposes. + log::trace!("alpha: {:?}", alpha); + + // Return the computed alpha values. + alpha + } + + /// Clamp the alpha values between alpha_high and alpha_low. + /// + /// # Args: + /// * `alpha` - A vector of alpha values. + /// * `alpha_high` - The high alpha value. + /// * `alpha_low` - The low alpha value. + /// + /// # Returns: + /// A vector of clamped alpha values. + pub fn clamp_alpha_values( + alpha: Vec, + alpha_high: I32F32, + alpha_low: I32F32, + ) -> Vec { + let clamped_alpha: Vec = alpha + .iter() + .map(|a| { + // First, clamp the value to ensure it does not exceed the upper bound (alpha_high). + // If 'a' is greater than 'alpha_high', it will be set to 'alpha_high'. + // If 'a' is less than or equal to 'alpha_high', it remains unchanged. + let clamped_a = a + .min(&alpha_high) + // Next, clamp the value to ensure it does not go below the lower bound (alpha_low). + // If the value (after the first clamping) is less than 'alpha_low', it will be set to 'alpha_low'. + // If the value is greater than or equal to 'alpha_low', it remains unchanged. + .max(&alpha_low); + // Return the clamped value. + *clamped_a + }) + .collect(); + log::trace!("alpha_clamped: {:?}", clamped_alpha); + clamped_alpha + } + + /// Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values for a sparse matrix. + /// + /// # Args: + /// * `bonds_delta` - A vector of bond deltas. + /// * `bonds` - A vector of bonds. + /// * `alpha` - A vector of clamped alpha values. + /// + /// # Returns: + /// A vector of EMA bonds. + pub fn compute_ema_bonds_with_liquid_alpha_sparse( + bonds_delta: &Vec>, + bonds: &Vec>, + alpha: Vec, + ) -> Vec> { + // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. + let ema_bonds = mat_ema_alpha_vec_sparse(bonds_delta, bonds, &alpha); + + // Log the computed EMA bonds for debugging purposes. + log::trace!( + "Exponential Moving Average Bonds Liquid Alpha: {:?}", + ema_bonds + ); + + // Return the computed EMA bonds. + ema_bonds + } + + /// Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. + /// + /// # Args: + /// * `bonds_delta` - A vector of bond deltas. + /// * `bonds` - A vector of bonds. + /// * `alpha` - A vector of clamped alpha values. + /// + /// # Returns: + /// A vector of EMA bonds. + pub fn compute_ema_bonds_with_liquid_alpha( + bonds_delta: &Vec>, + bonds: &Vec>, + alpha: Vec, + ) -> Vec> { + // Compute the Exponential Moving Average (EMA) of bonds using the provided clamped alpha values. + let ema_bonds = mat_ema_alpha_vec(bonds_delta, bonds, &alpha); + + // Log the computed EMA bonds for debugging purposes. + log::trace!( + "Exponential Moving Average Bonds Liquid Alpha: {:?}", + ema_bonds + ); + + // Return the computed EMA bonds. + ema_bonds + } + + /// Compute the Exponential Moving Average (EMA) of bonds using a normal alpha value for a sparse matrix. + /// + /// # Args: + /// * `bonds_delta` - A vector of bond deltas. + /// * `bonds` - A vector of bonds. + /// * `netuid` - The network ID. + /// + /// # Returns: + /// A vector of EMA bonds. + pub fn compute_ema_bonds_normal_sparse( + bonds_delta: &Vec>, + bonds: &Vec>, + netuid: u16, + ) -> Vec> { + // Retrieve the bonds moving average for the given network ID and scale it down. + let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) + .saturating_div(I64F64::from_num(1_000_000)); + + // Calculate the alpha value for the EMA calculation. + // Alpha is derived by subtracting the scaled bonds moving average from 1. + let alpha: I32F32 = + I32F32::from_num(1).saturating_sub(I32F32::from_num(bonds_moving_average)); + + // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. + let ema_bonds = mat_ema_sparse(bonds_delta, bonds, alpha); + + // Log the computed EMA bonds for debugging purposes. + log::trace!("Exponential Moving Average Bonds Normal: {:?}", ema_bonds); + + // Return the computed EMA bonds. + ema_bonds + } + + /// Compute the Exponential Moving Average (EMA) of bonds using a normal alpha value. + /// + /// # Args: + /// * `bonds_delta` - A vector of bond deltas. + /// * `bonds` - A vector of bonds. + /// * `netuid` - The network ID. + /// + /// # Returns: + /// A vector of EMA bonds. + pub fn compute_ema_bonds_normal( + bonds_delta: &Vec>, + bonds: &Vec>, + netuid: u16, + ) -> Vec> { + // Retrieve the bonds moving average for the given network ID and scale it down. + let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) + .saturating_div(I64F64::from_num(1_000_000)); + + // Calculate the alpha value for the EMA calculation. + // Alpha is derived by subtracting the scaled bonds moving average from 1. + let alpha: I32F32 = + I32F32::from_num(1).saturating_sub(I32F32::from_num(bonds_moving_average)); + + // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. + let ema_bonds = mat_ema(bonds_delta, bonds, alpha); + + // Log the computed EMA bonds for debugging purposes. + log::trace!("Exponential Moving Average Bonds Normal: {:?}", ema_bonds); + + // Return the computed EMA bonds. + ema_bonds + } + + /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting for a sparse matrix. + /// + /// # Args: + /// * `netuid` - The network ID. + /// * `consensus` - A vector of consensus values. + /// * `bonds_delta` - A vector of bond deltas. + /// * `bonds` - A vector of bonds. + /// + /// # Returns: + /// A vector of EMA bonds. + pub fn compute_ema_bonds_sparse( + netuid: u16, + consensus: Vec, + bonds_delta: Vec>, + bonds: Vec>, + ) -> Vec> { + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. + // This way we avoid the quantil function panic. + if LiquidAlphaOn::::get(netuid) + && !consensus.is_empty() + && consensus.iter().any(|&c| c != I32F32::from_num(0)) + { + // Calculate the 75th percentile (high) and 25th percentile (low) of the consensus values. + let consensus_high = quantile(&consensus, 0.75); + let consensus_low = quantile(&consensus, 0.25); + + // Further check if the high and low consensus values meet the required conditions. + if (consensus_high > consensus_low) && consensus_high != 0 && consensus_low != 0 { + // if (consensus_high > consensus_low) || consensus_high != 0) || consensus_low != 0 { + // if (consensus_high > consensus_low) || consensus_low != 0 { + log::trace!("Using Liquid Alpha"); + + // Get the high and low alpha values for the network. + let alpha_high = Self::get_alpha_high(netuid); + log::trace!("alpha_high: {:?}", alpha_high); + let alpha_low = Self::get_alpha_low(netuid); + log::trace!("alpha_low: {:?}", alpha_low); + + // Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. + let (a, b) = Self::calculate_logistic_params( + alpha_high, + alpha_low, + consensus_high, + consensus_low, + ); + + // Compute the alpha values using the logistic function parameters. + let alpha = Self::compute_alpha_values(&consensus, a, b); + + // Clamp the alpha values between alpha_high and alpha_low. + let clamped_alpha = Self::clamp_alpha_values(alpha, alpha_high, alpha_low); + + // Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. + Self::compute_ema_bonds_with_liquid_alpha_sparse( + &bonds_delta, + &bonds, + clamped_alpha, + ) + } else { + log::trace!("Using Bonds Moving Average"); + + // Compute the EMA of bonds using a normal alpha value. + Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid) + } + } else { + log::trace!("Using Bonds Moving Average"); + + // Compute the EMA of bonds using a normal alpha value. + Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid) + } + } + + /// Compute the Exponential Moving Average (EMA) of bonds based on the Liquid Alpha setting. + /// + /// # Args: + /// * `netuid` - The network ID. + /// * `consensus` - A vector of consensus values. + /// * `bonds_delta` - A vector of bond deltas. + /// * `bonds` - A vector of bonds. + /// + /// # Returns: + /// A vector of EMA bonds. + pub fn compute_ema_bonds( + netuid: u16, + consensus: Vec, + bonds_delta: Vec>, + bonds: Vec>, + ) -> Vec> { + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. + if LiquidAlphaOn::::get(netuid) + && !consensus.is_empty() + && consensus.iter().any(|&c| c != I32F32::from_num(0)) + { + // Calculate the 75th percentile (high) and 25th percentile (low) of the consensus values. + let consensus_high = quantile(&consensus, 0.75); + let consensus_low = quantile(&consensus, 0.25); + + // Further check if the high and low consensus values meet the required conditions. + if (consensus_high > consensus_low) && consensus_high != 0 && consensus_low != 0 { + log::trace!("Using Liquid Alpha"); + + // Get the high and low alpha values for the network. + let alpha_high = Self::get_alpha_high(netuid); + log::trace!("alpha_high: {:?}", alpha_high); + let alpha_low = Self::get_alpha_low(netuid); + log::trace!("alpha_low: {:?}", alpha_low); + + // Calculate the logistic function parameters 'a' and 'b' based on alpha and consensus values. + let (a, b) = Self::calculate_logistic_params( + alpha_high, + alpha_low, + consensus_high, + consensus_low, + ); + + // Compute the alpha values using the logistic function parameters. + let alpha = Self::compute_alpha_values(&consensus, a, b); + + // Clamp the alpha values between alpha_high and alpha_low. + let clamped_alpha = Self::clamp_alpha_values(alpha, alpha_high, alpha_low); + + // Compute the Exponential Moving Average (EMA) of bonds using the clamped alpha values. + Self::compute_ema_bonds_with_liquid_alpha(&bonds_delta, &bonds, clamped_alpha) + } else { + log::trace!("Using Bonds Moving Average"); + + // Compute the EMA of bonds using a normal alpha value. + Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid) + } + } else { + log::trace!("Using Bonds Moving Average"); + + // Compute the EMA of bonds using a normal alpha value. + Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid) + } + } } diff --git a/pallets/subtensor/src/errors.rs b/pallets/subtensor/src/errors.rs index 62c1d799b..3f432079a 100644 --- a/pallets/subtensor/src/errors.rs +++ b/pallets/subtensor/src/errors.rs @@ -126,5 +126,7 @@ mod errors { CommitRevealEnabled, /// Attemtping to commit/reveal weights when disabled. CommitRevealDisabled, + /// Attempting to set alpha high/low while disabled + LiquidAlphaDisabled, } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 61eb17f4a..f5d1e1f8e 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -29,7 +29,6 @@ use sp_std::marker::PhantomData; // ============================ // ==== Benchmark Imports ===== // ============================ -#[cfg(feature = "runtime-benchmarks")] mod benchmarks; // ========================= @@ -39,7 +38,7 @@ mod block_step; mod epoch; mod errors; mod events; -mod math; +pub mod math; mod registration; mod root; mod serving; @@ -239,6 +238,15 @@ pub mod pallet { /// Initial target stakes per interval issuance. #[pallet::constant] type InitialTargetStakesPerInterval: Get; + /// The upper bound for the alpha parameter. Used for Liquid Alpha. + #[pallet::constant] + type AlphaHigh: Get; + /// The lower bound for the alpha parameter. Used for Liquid Alpha. + #[pallet::constant] + type AlphaLow: Get; + /// A flag to indicate if Liquid Alpha is enabled. + #[pallet::constant] + type LiquidAlphaOn: Get; } /// Alias for the account ID. @@ -365,6 +373,14 @@ pub mod pallet { ValueQuery, DefaultAccountTake, >; + /// -- ITEM (switches liquid alpha on) + #[pallet::type_value] + pub fn DefaultLiquidAlpha() -> bool { + return false; + } + #[pallet::storage] // --- MAP ( netuid ) --> Whether or not Liquid Alpha is enabled + pub type LiquidAlphaOn = + StorageMap<_, Blake2_128Concat, u16, bool, ValueQuery, DefaultLiquidAlpha>; /// ===================================== /// ==== Difficulty / Registrations ===== @@ -847,6 +863,17 @@ pub mod pallet { pub fn DefaultWeightsMinStake() -> u64 { 0 } + /// Provides the default value for the upper bound of the alpha parameter. + + #[pallet::type_value] + pub fn DefaultAlphaHigh() -> u16 { + 900 // Represents 0.9 + } + /// Provides the default value for the lower bound of the alpha parameter. + #[pallet::type_value] + pub fn DefaultAlphaLow() -> u16 { + 700 // Represents 0.7 + } #[pallet::storage] // ITEM( weights_min_stake ) pub type WeightsMinStake = StorageValue<_, u64, ValueQuery, DefaultWeightsMinStake>; @@ -917,6 +944,12 @@ pub mod pallet { #[pallet::storage] // --- DMAP ( netuid ) --> adjustment_alpha pub type AdjustmentAlpha = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultAdjustmentAlpha>; + // MAP ( netuid ) --> alpha_high + #[pallet::storage] + pub type AlphaHigh = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultAlphaHigh>; + // MAP ( netuid ) --> alpha_low + #[pallet::storage] + pub type AlphaLow = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultAlphaLow>; #[pallet::storage] // --- MAP (netuid, who) --> (hash, weight) | Returns the hash and weight committed by an account for a given netuid. pub type WeightCommits = StorageDoubleMap< @@ -2063,7 +2096,7 @@ pub mod pallet { let current_block_number: u64 = Self::get_current_block_as_u64(); let default_priority: u64 = current_block_number - Self::get_last_update_for_uid(netuid, uid); - return default_priority + u32::max_value() as u64; + return default_priority + u32::MAX as u64; } 0 } @@ -2141,7 +2174,7 @@ where pub fn get_priority_vanilla() -> u64 { // Return high priority so that every extrinsic except set_weights function will // have a higher priority than the set_weights call - u64::max_value() + u64::MAX } pub fn get_priority_set_weights(who: &T::AccountId, netuid: u16) -> u64 { @@ -2219,9 +2252,9 @@ where Err(InvalidTransaction::Call.into()) } } - Some(Call::set_root_weights { netuid, .. }) => { - if Self::check_weights_min_stake(who) { - let priority: u64 = Self::get_priority_set_weights(who, *netuid); + Some(Call::set_root_weights { netuid, hotkey, .. }) => { + if Self::check_weights_min_stake(hotkey) { + let priority: u64 = Self::get_priority_set_weights(hotkey, *netuid); Ok(ValidTransaction { priority, longevity: 1, diff --git a/pallets/subtensor/src/math.rs b/pallets/subtensor/src/math.rs index e10cc0001..aa78d65b8 100644 --- a/pallets/subtensor/src/math.rs +++ b/pallets/subtensor/src/math.rs @@ -1,6 +1,8 @@ +use num_traits::float::Float; use sp_runtime::traits::CheckedAdd; +use sp_std::cmp::Ordering; use sp_std::vec; -use substrate_fixed::transcendental::exp; +use substrate_fixed::transcendental::{exp, ln}; use substrate_fixed::types::{I32F32, I64F64}; // TODO: figure out what cfg gate this needs to not be a warning in rustc @@ -167,8 +169,8 @@ pub fn is_zero(vector: &[I32F32]) -> bool { // Exp safe function with I32F32 output of I32F32 input. #[allow(dead_code)] pub fn exp_safe(input: I32F32) -> I32F32 { - let min_input: I32F32 = I32F32::from_num(-20); // <= 1/exp(-20) = 485 165 195,4097903 - let max_input: I32F32 = I32F32::from_num(20); // <= exp(20) = 485 165 195,4097903 + let min_input: I32F32 = I32F32::from_num(-20); // <= 1/exp(-20) = 485 165 195,4097903 + let max_input: I32F32 = I32F32::from_num(20); // <= exp(20) = 485 165 195,4097903 let mut safe_input: I32F32 = input; if input < min_input { safe_input = min_input; @@ -1113,2146 +1115,194 @@ pub fn sparse_threshold(w: &[Vec<(u16, I32F32)>], threshold: I32F32) -> Vec>, +// old: &Vec>, +// alpha: &Vec, +// ) -> Vec> { +// assert!(new.len() == old.len()); +// let n = new.len(); // assume square matrix, rows=cols +// let zero: I32F32 = I32F32::from_num(0.0); +// let mut result: Vec> = vec![vec![]; n]; +// for i in 0..new.len() { +// let mut row: Vec = vec![zero; n]; +// for (j, value) in new[i].iter() { +// let alpha_val: I32F32 = alpha[*j as usize]; +// row[*j as usize] += alpha_val * value; +// } +// for (j, value) in old[i].iter() { +// let one_minus_alpha: I32F32 = I32F32::from_num(1.0) - alpha[*j as usize]; +// row[*j as usize] += one_minus_alpha * value; +// } +// for (j, value) in row.iter().enumerate() { +// if *value > zero { +// result[i].push((j as u16, *value)) +// } +// } +// } +// result +// } + +/// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values. +#[allow(dead_code)] +pub fn mat_ema_alpha_vec_sparse( + new: &[Vec<(u16, I32F32)>], + old: &[Vec<(u16, I32F32)>], + alpha: &[I32F32], +) -> Vec> { + // Ensure the new and old matrices have the same number of rows. + assert!(new.len() == old.len()); + let n = new.len(); // Assume square matrix, rows=cols + let zero: I32F32 = I32F32::from_num(0.0); + let mut result: Vec> = vec![vec![]; n]; - fn assert_mat_compare(ma: &[Vec], mb: &[Vec], epsilon: I32F32) { - assert!(ma.len() == mb.len()); - for row in 0..ma.len() { - assert!(ma[row].len() == mb[row].len()); - for col in 0..ma[row].len() { - assert_float_compare(ma[row][col], mb[row][col], epsilon) - } - } - } + // Iterate over each row of the matrices. + for i in 0..new.len() { + // Initialize a row of zeros for the result matrix. + let mut row: Vec = vec![zero; n]; - fn assert_sparse_mat_compare( - ma: &[Vec<(u16, I32F32)>], - mb: &[Vec<(u16, I32F32)>], - epsilon: I32F32, - ) { - assert!(ma.len() == mb.len()); - for row in 0..ma.len() { - assert!(ma[row].len() == mb[row].len()); - for j in 0..ma[row].len() { - assert!(ma[row][j].0 == mb[row][j].0); // u16 - assert_float_compare(ma[row][j].1, mb[row][j].1, epsilon) // I32F32 - } + // Process the new matrix values. + for (j, value) in new[i].iter() { + // Retrieve the alpha value for the current column. + let alpha_val: I32F32 = alpha[*j as usize]; + // Compute the EMA component for the new value. + row[*j as usize] = alpha_val * value; + log::trace!( + "new[{}][{}] * alpha[{}] = {} * {} = {}", + i, + j, + j, + value, + alpha_val, + row[*j as usize] + ); } - } - - fn vec_to_fixed(vector: &[f32]) -> Vec { - vector.iter().map(|x| I32F32::from_num(*x)).collect() - } - - #[test] - fn test_vec_max_upscale_to_u16() { - let vector: Vec = vec_to_fixed(&[]); - let target: Vec = vec![]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0.]); - let target: Vec = vec![0]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 0.]); - let target: Vec = vec![0, 0]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 1.]); - let target: Vec = vec![0, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 0.000000001]); - let target: Vec = vec![0, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 0.000016, 1.]); - let target: Vec = vec![0, 1, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0.000000001, 0.000000001]); - let target: Vec = vec![65535, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[ - 0.000001, 0.000006, 0.000007, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, - ]); - let target: Vec = vec![0, 1, 1, 16, 164, 1638, 16384, 32768, 49151, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![I32F32::from_num(16384)]; - let target: Vec = vec![65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![I32F32::from_num(32768)]; - let target: Vec = vec![65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![I32F32::from_num(32769)]; - let target: Vec = vec![65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![I32F32::from_num(65535)]; - let target: Vec = vec![65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![I32F32::max_value()]; - let target: Vec = vec![65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 1., 65535.]); - let target: Vec = vec![0, 1, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 0.5, 1., 1.5, 2., 32768.]); - let target: Vec = vec![0, 1, 2, 3, 4, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 0.5, 1., 1.5, 2., 32768., 32769.]); - let target: Vec = vec![0, 1, 2, 3, 4, 65533, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![ - I32F32::from_num(0), - I32F32::from_num(1), - I32F32::from_num(32768), - I32F32::from_num(32769), - I32F32::max_value(), - ]; - let target: Vec = vec![0, 0, 1, 1, 65535]; - let result: Vec = vec_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - } - - #[test] - fn test_vec_u16_max_upscale_to_u16() { - let vector: Vec = vec![]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &vector); - let vector: Vec = vec![0]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &vector); - let vector: Vec = vec![0, 0]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &vector); - let vector: Vec = vec![1]; - let target: Vec = vec![65535]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![0, 1]; - let target: Vec = vec![0, 65535]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![65534]; - let target: Vec = vec![65535]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![65535]; - let target: Vec = vec![65535]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![65535, 65535]; - let target: Vec = vec![65535, 65535]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![0, 1, 65534]; - let target: Vec = vec![0, 1, 65535]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &target); - let vector: Vec = vec![0, 1, 2, 3, 4, 65533, 65535]; - let result: Vec = vec_u16_max_upscale_to_u16(&vector); - assert_vec_compare_u16(&result, &vector); - } - #[test] - fn test_check_vec_max_limited() { - let vector: Vec = vec![]; - let max_limit: u16 = 0; - assert!(check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![]; - let max_limit: u16 = u16::MAX; - assert!(check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![u16::MAX]; - let max_limit: u16 = u16::MAX; - assert!(check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![u16::MAX]; - let max_limit: u16 = u16::MAX - 1; - assert!(!check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![u16::MAX]; - let max_limit: u16 = 0; - assert!(!check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![0]; - let max_limit: u16 = u16::MAX; - assert!(check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![0, u16::MAX]; - let max_limit: u16 = u16::MAX; - assert!(check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![0, u16::MAX, u16::MAX]; - let max_limit: u16 = u16::MAX / 2; - assert!(!check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![0, u16::MAX, u16::MAX]; - let max_limit: u16 = u16::MAX / 2 + 1; - assert!(check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![0, u16::MAX, u16::MAX, u16::MAX]; - let max_limit: u16 = u16::MAX / 3 - 1; - assert!(!check_vec_max_limited(&vector, max_limit)); - let vector: Vec = vec![0, u16::MAX, u16::MAX, u16::MAX]; - let max_limit: u16 = u16::MAX / 3; - assert!(check_vec_max_limited(&vector, max_limit)); - } - - #[test] - fn test_math_fixed_overflow() { - let max_32: I32F32 = I32F32::max_value(); - let max_u64: u64 = u64::MAX; - let _prod_96: I96F32 = I96F32::from_num(max_32) * I96F32::from_num(max_u64); - // let one: I96F32 = I96F32::from_num(1); - // let prod_96: I96F32 = (I96F32::from_num(max_32) + one) * I96F32::from_num(max_u64); // overflows - let _prod_110: I110F18 = I110F18::from_num(max_32) * I110F18::from_num(max_u64); - - let bonds_moving_average_val: u64 = 900_000_u64; - let bonds_moving_average: I64F64 = - I64F64::from_num(bonds_moving_average_val) / I64F64::from_num(1_000_000); - let alpha: I32F32 = I32F32::from_num(1) - I32F32::from_num(bonds_moving_average); - assert_eq!(I32F32::from_num(0.1), alpha); - - let bonds_moving_average: I64F64 = I64F64::from_num(max_32) / I64F64::from_num(max_32); - let alpha: I32F32 = I32F32::from_num(1) - I32F32::from_num(bonds_moving_average); - assert_eq!(I32F32::from_num(0), alpha); - } - - #[test] - fn test_math_u64_normalization() { - let min: u64 = 1; - let min32: u64 = 4_889_444; // 21_000_000_000_000_000 / 4_294_967_296 - let mid: u64 = 10_500_000_000_000_000; - let max: u64 = 21_000_000_000_000_000; - let min_64: I64F64 = I64F64::from_num(min); - let min32_64: I64F64 = I64F64::from_num(min32); - let mid_64: I64F64 = I64F64::from_num(mid); - let max_64: I64F64 = I64F64::from_num(max); - let max_sum: I64F64 = I64F64::from_num(max); - let min_frac: I64F64 = min_64 / max_sum; - assert_eq!(min_frac, I64F64::from_num(0.0000000000000000476)); - let min_frac_32: I32F32 = I32F32::from_num(min_frac); - assert_eq!(min_frac_32, I32F32::from_num(0)); - let min32_frac: I64F64 = min32_64 / max_sum; - assert_eq!(min32_frac, I64F64::from_num(0.00000000023283066664)); - let min32_frac_32: I32F32 = I32F32::from_num(min32_frac); - assert_eq!(min32_frac_32, I32F32::from_num(0.0000000002)); - let half: I64F64 = mid_64 / max_sum; - assert_eq!(half, I64F64::from_num(0.5)); - let half_32: I32F32 = I32F32::from_num(half); - assert_eq!(half_32, I32F32::from_num(0.5)); - let one: I64F64 = max_64 / max_sum; - assert_eq!(one, I64F64::from_num(1)); - let one_32: I32F32 = I32F32::from_num(one); - assert_eq!(one_32, I32F32::from_num(1)); - } - - #[test] - fn test_math_to_num() { - let val: I32F32 = I32F32::from_num(u16::MAX); - let res: u16 = val.to_num::(); - assert_eq!(res, u16::MAX); - let vector: Vec = vec![val; 1000]; - let target: Vec = vec![u16::MAX; 1000]; - let output: Vec = vector.iter().map(|e: &I32F32| e.to_num::()).collect(); - assert_eq!(output, target); - let output: Vec = vector - .iter() - .map(|e: &I32F32| (*e).to_num::()) - .collect(); - assert_eq!(output, target); - let val: I32F32 = I32F32::max_value(); - let res: u64 = val.to_num::(); - let vector: Vec = vec![val; 1000]; - let target: Vec = vec![res; 1000]; - let output: Vec = vector.iter().map(|e: &I32F32| e.to_num::()).collect(); - assert_eq!(output, target); - let output: Vec = vector - .iter() - .map(|e: &I32F32| (*e).to_num::()) - .collect(); - assert_eq!(output, target); - let val: I32F32 = I32F32::from_num(0); - let res: u64 = val.to_num::(); - let vector: Vec = vec![val; 1000]; - let target: Vec = vec![res; 1000]; - let output: Vec = vector.iter().map(|e: &I32F32| e.to_num::()).collect(); - assert_eq!(output, target); - let output: Vec = vector - .iter() - .map(|e: &I32F32| (*e).to_num::()) - .collect(); - assert_eq!(output, target); - let val: I96F32 = I96F32::from_num(u64::MAX); - let res: u64 = val.to_num::(); - assert_eq!(res, u64::MAX); - let vector: Vec = vec![val; 1000]; - let target: Vec = vec![u64::MAX; 1000]; - let output: Vec = vector.iter().map(|e: &I96F32| e.to_num::()).collect(); - assert_eq!(output, target); - let output: Vec = vector - .iter() - .map(|e: &I96F32| (*e).to_num::()) - .collect(); - assert_eq!(output, target); - } - - #[test] - fn test_math_vec_to_fixed() { - let vector: Vec = vec![0., 1., 2., 3.]; - let target: Vec = vec![ - I32F32::from_num(0.), - I32F32::from_num(1.), - I32F32::from_num(2.), - I32F32::from_num(3.), - ]; - let result = vec_to_fixed(&vector); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - // Reshape vector to matrix with specified number of rows, cast to I32F32. - fn vec_to_mat_fixed(vector: &[f32], rows: usize, transpose: bool) -> Vec> { - assert!( - vector.len() % rows == 0, - "Vector of len {:?} cannot reshape to {rows} rows.", - vector.len() - ); - let cols: usize = vector.len() / rows; - let mut mat: Vec> = vec![]; - if transpose { - for col in 0..cols { - let mut vals: Vec = vec![]; - for row in 0..rows { - vals.push(I32F32::from_num(vector[row * cols + col])); - } - mat.push(vals); - } - } else { - for row in 0..rows { - mat.push( - vector[row * cols..(row + 1) * cols] - .iter() - .map(|v| I32F32::from_num(*v)) - .collect(), - ); - } + // Process the old matrix values. + for (j, value) in old[i].iter() { + // Retrieve the alpha value for the current column. + let alpha_val: I32F32 = alpha[*j as usize]; + // Calculate the complement of the alpha value. + let one_minus_alpha: I32F32 = I32F32::from_num(1.0) - alpha_val; + // Compute the EMA component for the old value and add it to the row. + row[*j as usize] += one_minus_alpha * value; + log::trace!( + "old[{}][{}] * (1 - alpha[{}]) = {} * {} = {}", + i, + j, + j, + value, + one_minus_alpha, + one_minus_alpha * value + ); } - mat - } - #[test] - fn test_math_vec_to_mat_fixed() { - let vector: Vec = vec![0., 1., 2., 0., 10., 100.]; - let target: Vec> = vec![ - vec![ - I32F32::from_num(0.), - I32F32::from_num(1.), - I32F32::from_num(2.), - ], - vec![ - I32F32::from_num(0.), - I32F32::from_num(10.), - I32F32::from_num(100.), - ], - ]; - let mat = vec_to_mat_fixed(&vector, 2, false); - assert_mat_compare(&mat, &target, I32F32::from_num(0)); - } - - // Reshape vector to sparse matrix with specified number of input rows, cast f32 to I32F32. - fn vec_to_sparse_mat_fixed( - vector: &[f32], - rows: usize, - transpose: bool, - ) -> Vec> { - assert!( - vector.len() % rows == 0, - "Vector of len {:?} cannot reshape to {rows} rows.", - vector.len() - ); - let cols: usize = vector.len() / rows; - let mut mat: Vec> = vec![]; - if transpose { - for col in 0..cols { - let mut row_vec: Vec<(u16, I32F32)> = vec![]; - for row in 0..rows { - if vector[row * cols + col] > 0. { - row_vec.push((row as u16, I32F32::from_num(vector[row * cols + col]))); - } - } - mat.push(row_vec); - } - } else { - for row in 0..rows { - let mut row_vec: Vec<(u16, I32F32)> = vec![]; - for col in 0..cols { - if vector[row * cols + col] > 0. { - row_vec.push((col as u16, I32F32::from_num(vector[row * cols + col]))); - } - } - mat.push(row_vec); + // Collect the non-zero values into the result matrix. + for (j, value) in row.iter().enumerate() { + if *value > zero { + result[i].push((j as u16, *value)); + log::trace!("result[{}][{}] = {}", i, j, value); } } - mat - } - - #[test] - fn test_math_vec_to_sparse_mat_fixed() { - let vector: Vec = vec![0., 1., 2., 0., 10., 100.]; - let target: Vec> = vec![ - vec![(1_u16, I32F32::from_num(1.)), (2_u16, I32F32::from_num(2.))], - vec![ - (1_u16, I32F32::from_num(10.)), - (2_u16, I32F32::from_num(100.)), - ], - ]; - let mat = vec_to_sparse_mat_fixed(&vector, 2, false); - assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); - let vector: Vec = vec![0., 0.]; - let target: Vec> = vec![vec![], vec![]]; - let mat = vec_to_sparse_mat_fixed(&vector, 2, false); - assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); - let vector: Vec = vec![0., 1., 2., 0., 10., 100.]; - let target: Vec> = vec![ - vec![], - vec![ - (0_u16, I32F32::from_num(1.)), - (1_u16, I32F32::from_num(10.)), - ], - vec![ - (0_u16, I32F32::from_num(2.)), - (1_u16, I32F32::from_num(100.)), - ], - ]; - let mat = vec_to_sparse_mat_fixed(&vector, 2, true); - assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); - let vector: Vec = vec![0., 0.]; - let target: Vec> = vec![vec![]]; - let mat = vec_to_sparse_mat_fixed(&vector, 2, true); - assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_exp_safe() { - let zero: I32F32 = I32F32::from_num(0); - let one: I32F32 = I32F32::from_num(1); - let target: I32F32 = exp(zero).unwrap(); - assert_eq!(exp_safe(zero), target); - let target: I32F32 = exp(one).unwrap(); - assert_eq!(exp_safe(one), target); - let min_input: I32F32 = I32F32::from_num(-20); // <= 1/exp(-20) = 485 165 195,4097903 - let max_input: I32F32 = I32F32::from_num(20); // <= exp(20) = 485 165 195,4097903 - let target: I32F32 = exp(min_input).unwrap(); - assert_eq!(exp_safe(min_input), target); - assert_eq!(exp_safe(min_input - one), target); - assert_eq!(exp_safe(I32F32::min_value()), target); - let target: I32F32 = exp(max_input).unwrap(); - assert_eq!(exp_safe(max_input), target); - assert_eq!(exp_safe(max_input + one), target); - assert_eq!(exp_safe(I32F32::max_value()), target); - } - - #[test] - fn test_math_sigmoid_safe() { - let trust: Vec = vec![ - I32F32::min_value(), - I32F32::from_num(0), - I32F32::from_num(0.4), - I32F32::from_num(0.5), - I32F32::from_num(0.6), - I32F32::from_num(1), - I32F32::max_value(), - ]; - let consensus: Vec = trust - .iter() - .map(|t: &I32F32| sigmoid_safe(*t, I32F32::max_value(), I32F32::max_value())) - .collect(); - let target: Vec = vec_to_fixed(&[ - 0.0000000019, - 0.0000000019, - 0.0000000019, - 0.0000000019, - 0.0000000019, - 0.0000000019, - 0.5, - ]); - assert_eq!(&consensus, &target); - let consensus: Vec = trust - .iter() - .map(|t: &I32F32| sigmoid_safe(*t, I32F32::min_value(), I32F32::min_value())) - .collect(); - let target: Vec = vec_to_fixed(&[ - 0.5, - 0.0000000019, - 0.0000000019, - 0.0000000019, - 0.0000000019, - 0.0000000019, - 0.0000000019, - ]); - assert_eq!(&consensus, &target); - let consensus: Vec = trust - .iter() - .map(|t: &I32F32| sigmoid_safe(*t, I32F32::from_num(30), I32F32::from_num(0.5))) - .collect(); - let target: Vec = vec![ - 0.0000000019, - 0.0000003057, - 0.0474258729, - 0.5, - 0.952574127, - 0.9999996943, - 0.9999999981, - ]; - let target: Vec = target.iter().map(|c: &f64| I32F32::from_num(*c)).collect(); - assert_eq!(&consensus, &target); - let trust: Vec = - vec_to_fixed(&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.]); - let consensus: Vec = trust - .iter() - .map(|t: &I32F32| sigmoid_safe(*t, I32F32::from_num(40), I32F32::from_num(0.5))) - .collect(); - let target: Vec = vec![ - 0.0000000019, - 0.0000001125, - 0.0000061442, - 0.0003353502, - 0.017986214, - 0.5, - 0.9820138067, - 0.9996646498, - 0.9999938558, - 0.9999998875, - 0.9999999981, - ]; - let target: Vec = target.iter().map(|c: &f64| I32F32::from_num(*c)).collect(); - assert_eq!(&consensus, &target); - } - - #[test] - fn test_math_is_topk() { - let vector: Vec = vec_to_fixed(&[]); - let result = is_topk(&vector, 5); - let target: Vec = vec![]; - assert_eq!(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]); - let result = is_topk(&vector, 0); - let target: Vec = vec![ - false, false, false, false, false, false, false, false, false, false, - ]; - assert_eq!(&result, &target); - let result = is_topk(&vector, 5); - let target: Vec = vec![ - false, false, false, false, false, true, true, true, true, true, - ]; - assert_eq!(&result, &target); - let result = is_topk(&vector, 10); - let target: Vec = vec![true, true, true, true, true, true, true, true, true, true]; - assert_eq!(&result, &target); - let result = is_topk(&vector, 100); - assert_eq!(&result, &target); - let vector: Vec = vec_to_fixed(&[9., 8., 7., 6., 5., 4., 3., 2., 1., 0.]); - let result = is_topk(&vector, 5); - let target: Vec = vec![ - true, true, true, true, true, false, false, false, false, false, - ]; - assert_eq!(&result, &target); - let vector: Vec = vec_to_fixed(&[9., 0., 8., 1., 7., 2., 6., 3., 5., 4.]); - let result = is_topk(&vector, 5); - let target: Vec = vec![ - true, false, true, false, true, false, true, false, true, false, - ]; - assert_eq!(&result, &target); - let vector: Vec = vec_to_fixed(&[0.9, 0., 0.8, 0.1, 0.7, 0.2, 0.6, 0.3, 0.5, 0.4]); - let result = is_topk(&vector, 5); - let target: Vec = vec![ - true, false, true, false, true, false, true, false, true, false, - ]; - assert_eq!(&result, &target); - let vector: Vec = vec_to_fixed(&[0., 1., 2., 3., 4., 5., 5., 5., 5., 6.]); - let result = is_topk(&vector, 5); - let target: Vec = vec![ - false, false, false, false, false, true, true, true, true, true, - ]; - assert_eq!(&result, &target); - } - - #[test] - fn test_math_sum() { - assert!(sum(&[]) == I32F32::from_num(0)); - assert!( - sum(&[ - I32F32::from_num(1.0), - I32F32::from_num(10.0), - I32F32::from_num(30.0) - ]) == I32F32::from_num(41) - ); - assert!( - sum(&[ - I32F32::from_num(-1.0), - I32F32::from_num(10.0), - I32F32::from_num(30.0) - ]) == I32F32::from_num(39) - ); - } - - #[test] - fn test_math_normalize() { - let epsilon: I32F32 = I32F32::from_num(0.0001); - let x: Vec = vec![]; - let y: Vec = normalize(&x); - assert_vec_compare(&x, &y, epsilon); - let x: Vec = vec![ - I32F32::from_num(1.0), - I32F32::from_num(10.0), - I32F32::from_num(30.0), - ]; - let y: Vec = normalize(&x); - assert_vec_compare( - &y, - &[ - I32F32::from_num(0.0243902437), - I32F32::from_num(0.243902439), - I32F32::from_num(0.7317073171), - ], - epsilon, - ); - assert_float_compare(sum(&y), I32F32::from_num(1.0), epsilon); - let x: Vec = vec![ - I32F32::from_num(-1.0), - I32F32::from_num(10.0), - I32F32::from_num(30.0), - ]; - let y: Vec = normalize(&x); - assert_vec_compare( - &y, - &[ - I32F32::from_num(-0.0256410255), - I32F32::from_num(0.2564102563), - I32F32::from_num(0.769230769), - ], - epsilon, - ); - assert_float_compare(sum(&y), I32F32::from_num(1.0), epsilon); - } - - #[test] - fn test_math_inplace_normalize() { - let epsilon: I32F32 = I32F32::from_num(0.0001); - let mut x1: Vec = vec![ - I32F32::from_num(1.0), - I32F32::from_num(10.0), - I32F32::from_num(30.0), - ]; - inplace_normalize(&mut x1); - assert_vec_compare( - &x1, - &[ - I32F32::from_num(0.0243902437), - I32F32::from_num(0.243902439), - I32F32::from_num(0.7317073171), - ], - epsilon, - ); - let mut x2: Vec = vec![ - I32F32::from_num(-1.0), - I32F32::from_num(10.0), - I32F32::from_num(30.0), - ]; - inplace_normalize(&mut x2); - assert_vec_compare( - &x2, - &[ - I32F32::from_num(-0.0256410255), - I32F32::from_num(0.2564102563), - I32F32::from_num(0.769230769), - ], - epsilon, - ); - } - - #[test] - fn test_math_inplace_normalize_64() { - let epsilon: I64F64 = I64F64::from_num(0.0001); - let mut x1: Vec = vec![ - I64F64::from_num(1.0), - I64F64::from_num(10.0), - I64F64::from_num(30.0), - ]; - inplace_normalize_64(&mut x1); - assert_vec_compare_64( - &x1, - &[ - I64F64::from_num(0.0243902437), - I64F64::from_num(0.243902439), - I64F64::from_num(0.7317073171), - ], - epsilon, - ); - let mut x2: Vec = vec![ - I64F64::from_num(-1.0), - I64F64::from_num(10.0), - I64F64::from_num(30.0), - ]; - inplace_normalize_64(&mut x2); - assert_vec_compare_64( - &x2, - &[ - I64F64::from_num(-0.0256410255), - I64F64::from_num(0.2564102563), - I64F64::from_num(0.769230769), - ], - epsilon, - ); - } - - #[test] - fn test_math_vecdiv() { - let x: Vec = vec_to_fixed(&[]); - let y: Vec = vec_to_fixed(&[]); - let result: Vec = vec_to_fixed(&[]); - assert_eq!(result, vecdiv(&x, &y)); - - let x: Vec = vec_to_fixed(&[0., 1., 0., 1.]); - let y: Vec = vec_to_fixed(&[0., 1., 1., 0.]); - let result: Vec = vec_to_fixed(&[0., 1., 0., 0.]); - assert_eq!(result, vecdiv(&x, &y)); - - let x: Vec = vec_to_fixed(&[1., 1., 10.]); - let y: Vec = vec_to_fixed(&[2., 3., 2.]); - let result: Vec = vec![fixed(1.) / fixed(2.), fixed(1.) / fixed(3.), fixed(5.)]; - assert_eq!(result, vecdiv(&x, &y)); - } - - #[test] - fn test_math_inplace_row_normalize() { - let epsilon: I32F32 = I32F32::from_num(0.0001); - let vector: Vec = vec![ - 0., 1., 2., 3., 4., 0., 10., 100., 1000., 10000., 0., 0., 0., 0., 0., 1., 1., 1., 1., - 1., - ]; - let mut mat = vec_to_mat_fixed(&vector, 4, false); - inplace_row_normalize(&mut mat); - let target: Vec = vec![ - 0., 0.1, 0.2, 0.3, 0.4, 0., 0.0009, 0.009, 0.09, 0.9, 0., 0., 0., 0., 0., 0.2, 0.2, - 0.2, 0.2, 0.2, - ]; - assert_mat_compare(&mat, &vec_to_mat_fixed(&target, 4, false), epsilon); - } - - #[test] - fn test_math_inplace_row_normalize_sparse() { - let epsilon: I32F32 = I32F32::from_num(0.0001); - let vector: Vec = vec![ - 0., 1., 0., 2., 0., 3., 4., 0., 1., 0., 2., 0., 3., 0., 1., 0., 0., 2., 0., 3., 4., 0., - 10., 0., 100., 1000., 0., 10000., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., - 1., - ]; - let mut mat = vec_to_sparse_mat_fixed(&vector, 6, false); - inplace_row_normalize_sparse(&mut mat); - let target: Vec = vec![ - 0., 0.1, 0., 0.2, 0., 0.3, 0.4, 0., 0.166666, 0., 0.333333, 0., 0.5, 0., 0.1, 0., 0., - 0.2, 0., 0.3, 0.4, 0., 0.0009, 0., 0.009, 0.09, 0., 0.9, 0., 0., 0., 0., 0., 0., 0., - 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, - ]; - assert_sparse_mat_compare(&mat, &vec_to_sparse_mat_fixed(&target, 6, false), epsilon); - let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mut mat = vec_to_sparse_mat_fixed(&vector, 3, false); - inplace_row_normalize_sparse(&mut mat); - assert_sparse_mat_compare( - &mat, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - } - - #[test] - fn test_math_inplace_col_normalize() { - let epsilon: I32F32 = I32F32::from_num(0.0001); - let vector: Vec = vec![ - 0., 1., 2., 3., 4., 0., 10., 100., 1000., 10000., 0., 0., 0., 0., 0., 1., 1., 1., 1., - 1., - ]; - let mut mat = vec_to_mat_fixed(&vector, 4, true); - inplace_col_normalize(&mut mat); - let target: Vec = vec![ - 0., 0.1, 0.2, 0.3, 0.4, 0., 0.0009, 0.009, 0.09, 0.9, 0., 0., 0., 0., 0., 0.2, 0.2, - 0.2, 0.2, 0.2, - ]; - assert_mat_compare(&mat, &vec_to_mat_fixed(&target, 4, true), epsilon); - } - - #[test] - fn test_math_inplace_col_normalize_sparse() { - let epsilon: I32F32 = I32F32::from_num(0.0001); - let vector: Vec = vec![ - 0., 1., 0., 2., 0., 3., 4., 0., 1., 0., 2., 0., 3., 0., 1., 0., 0., 2., 0., 3., 4., 0., - 10., 0., 100., 1000., 0., 10000., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., - 1., - ]; - let mut mat = vec_to_sparse_mat_fixed(&vector, 6, true); - inplace_col_normalize_sparse(&mut mat, 6); - let target: Vec = vec![ - 0., 0.1, 0., 0.2, 0., 0.3, 0.4, 0., 0.166666, 0., 0.333333, 0., 0.5, 0., 0.1, 0., 0., - 0.2, 0., 0.3, 0.4, 0., 0.0009, 0., 0.009, 0.09, 0., 0.9, 0., 0., 0., 0., 0., 0., 0., - 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, - ]; - assert_sparse_mat_compare(&mat, &vec_to_sparse_mat_fixed(&target, 6, true), epsilon); - let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mut mat = vec_to_sparse_mat_fixed(&vector, 3, false); - inplace_col_normalize_sparse(&mut mat, 6); - assert_sparse_mat_compare( - &mat, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let mut mat: Vec> = vec![]; - let target: Vec> = vec![]; - inplace_col_normalize_sparse(&mut mat, 0); - assert_sparse_mat_compare(&mat, &target, epsilon); - } - - #[test] - fn test_math_inplace_col_max_upscale() { - let mut mat: Vec> = vec![vec![]]; - let target: Vec> = vec![vec![]]; - inplace_col_max_upscale(&mut mat); - assert_eq!(&mat, &target); - let mut mat: Vec> = vec![vec![I32F32::from_num(0)]]; - let target: Vec> = vec![vec![I32F32::from_num(0)]]; - inplace_col_max_upscale(&mut mat); - assert_eq!(&mat, &target); - let epsilon: I32F32 = I32F32::from_num(0.0001); - let vector: Vec = vec![ - 0., 1., 2., 3., 4., 0., 10., 100., 1000., 10000., 0., 0., 0., 0., 0., 1., 1., 1., 1., - 1., - ]; - let mut mat: Vec> = vec_to_mat_fixed(&vector, 4, true); - inplace_col_max_upscale(&mut mat); - let target: Vec = vec![ - 0., 0.25, 0.5, 0.75, 1., 0., 0.001, 0.01, 0.1, 1., 0., 0., 0., 0., 0., 1., 1., 1., 1., - 1., - ]; - assert_mat_compare(&mat, &vec_to_mat_fixed(&target, 4, true), epsilon); - } - - #[test] - fn test_math_inplace_col_max_upscale_sparse() { - let mut mat: Vec> = vec![vec![]]; - let target: Vec> = vec![vec![]]; - inplace_col_max_upscale_sparse(&mut mat, 0); - assert_eq!(&mat, &target); - let mut mat: Vec> = vec![vec![(0, I32F32::from_num(0))]]; - let target: Vec> = vec![vec![(0, I32F32::from_num(0))]]; - inplace_col_max_upscale_sparse(&mut mat, 1); - assert_eq!(&mat, &target); - let epsilon: I32F32 = I32F32::from_num(0.0001); - let vector: Vec = vec![ - 0., 1., 0., 2., 0., 3., 4., 0., 1., 0., 2., 0., 3., 0., 1., 0., 0., 2., 0., 3., 4., 0., - 10., 0., 100., 1000., 0., 10000., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., - 1., - ]; - let mut mat = vec_to_sparse_mat_fixed(&vector, 6, true); - inplace_col_max_upscale_sparse(&mut mat, 6); - let target: Vec = vec![ - 0., 0.25, 0., 0.5, 0., 0.75, 1., 0., 0.333333, 0., 0.666666, 0., 1., 0., 0.25, 0., 0., - 0.5, 0., 0.75, 1., 0., 0.001, 0., 0.01, 0.1, 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., - 1., 1., 1., 1., 1., 1., - ]; - assert_sparse_mat_compare(&mat, &vec_to_sparse_mat_fixed(&target, 6, true), epsilon); - let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mut mat = vec_to_sparse_mat_fixed(&vector, 3, false); - inplace_col_max_upscale_sparse(&mut mat, 6); - assert_sparse_mat_compare( - &mat, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let mut mat: Vec> = vec![]; - let target: Vec> = vec![]; - inplace_col_max_upscale_sparse(&mut mat, 0); - assert_sparse_mat_compare(&mat, &target, epsilon); - } - - #[test] - fn test_math_inplace_mask_vector() { - let mask: Vec = vec![false, false, false]; - let mut vector: Vec = vec_to_fixed(&[0., 1., 2.]); - let target: Vec = vec_to_fixed(&[0., 1., 2.]); - inplace_mask_vector(&mask, &mut vector); - assert_vec_compare(&vector, &target, I32F32::from_num(0)); - let mask: Vec = vec![false, true, false]; - let mut vector: Vec = vec_to_fixed(&[0., 1., 2.]); - let target: Vec = vec_to_fixed(&[0., 0., 2.]); - inplace_mask_vector(&mask, &mut vector); - assert_vec_compare(&vector, &target, I32F32::from_num(0)); - let mask: Vec = vec![true, true, true]; - let mut vector: Vec = vec_to_fixed(&[0., 1., 2.]); - let target: Vec = vec_to_fixed(&[0., 0., 0.]); - inplace_mask_vector(&mask, &mut vector); - assert_vec_compare(&vector, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_inplace_mask_matrix() { - let mask: Vec> = vec![ - vec![false, false, false], - vec![false, false, false], - vec![false, false, false], - ]; - let vector: Vec = vec![0., 1., 2., 3., 4., 5., 6., 7., 8.]; - let mut mat = vec_to_mat_fixed(&vector, 3, false); - inplace_mask_matrix(&mask, &mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&vector, 3, false), - I32F32::from_num(0), - ); - let mask: Vec> = vec![ - vec![true, false, false], - vec![false, true, false], - vec![false, false, true], - ]; - let target: Vec = vec![0., 1., 2., 3., 0., 5., 6., 7., 0.]; - let mut mat = vec_to_mat_fixed(&vector, 3, false); - inplace_mask_matrix(&mask, &mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let mask: Vec> = vec![ - vec![true, true, true], - vec![true, true, true], - vec![true, true, true], - ]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mut mat = vec_to_mat_fixed(&vector, 3, false); - inplace_mask_matrix(&mask, &mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - } - - #[test] - fn test_math_inplace_mask_rows() { - let input: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let mask: Vec = vec![false, false, false]; - let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let mut mat = vec_to_mat_fixed(&input, 3, false); - inplace_mask_rows(&mask, &mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let mask: Vec = vec![true, true, true]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mut mat = vec_to_mat_fixed(&input, 3, false); - inplace_mask_rows(&mask, &mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let mask: Vec = vec![true, false, true]; - let target: Vec = vec![0., 0., 0., 4., 5., 6., 0., 0., 0.]; - let mut mat = vec_to_mat_fixed(&input, 3, false); - inplace_mask_rows(&mask, &mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let input: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mut mat = vec_to_mat_fixed(&input, 3, false); - let mask: Vec = vec![false, false, false]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - inplace_mask_rows(&mask, &mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - } - - #[test] - fn test_math_inplace_mask_diag() { - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; - let mut mat = vec_to_mat_fixed(&vector, 3, false); - inplace_mask_diag(&mut mat); - assert_mat_compare( - &mat, - &vec_to_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - } - - #[test] - fn test_math_mask_rows_sparse() { - let input: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let mat = vec_to_sparse_mat_fixed(&input, 3, false); - let mask: Vec = vec![false, false, false]; - let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let result = mask_rows_sparse(&mask, &mat); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let mask: Vec = vec![true, true, true]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let result = mask_rows_sparse(&mask, &mat); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let mask: Vec = vec![true, false, true]; - let target: Vec = vec![0., 0., 0., 4., 5., 6., 0., 0., 0.]; - let result = mask_rows_sparse(&mask, &mat); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let input: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat = vec_to_sparse_mat_fixed(&input, 3, false); - let mask: Vec = vec![false, false, false]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let result = mask_rows_sparse(&mask, &mat); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - } - - #[test] - fn test_math_mask_diag_sparse() { - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let result = mask_diag_sparse(&mat); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let vector: Vec = vec![1., 0., 0., 0., 5., 0., 0., 0., 9.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let result = mask_diag_sparse(&mat); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let result = mask_diag_sparse(&mat); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - } - - #[test] - fn test_math_vec_mask_sparse_matrix() { - let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; - let target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let first_vector: Vec = vec![1, 2, 3]; - let second_vector: Vec = vec![1, 2, 3]; - let result = vec_mask_sparse_matrix(&mat, &first_vector, &second_vector, &|a, b| a == b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let target: Vec = vec![1., 0., 0., 4., 5., 0., 7., 8., 9.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let first_vector: Vec = vec![1, 2, 3]; - let second_vector: Vec = vec![1, 2, 3]; - let result = vec_mask_sparse_matrix(&mat, &first_vector, &second_vector, &|a, b| a < b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat = vec_to_sparse_mat_fixed(&vector, 3, false); - let first_vector: Vec = vec![1, 2, 3]; - let second_vector: Vec = vec![1, 2, 3]; - let result = vec_mask_sparse_matrix(&mat, &first_vector, &second_vector, &|a, b| a == b); - assert_sparse_mat_compare( - &result, - &vec_to_sparse_mat_fixed(&target, 3, false), - I32F32::from_num(0), - ); - } - - #[test] - fn test_math_row_hadamard() { - let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = row_hadamard(&matrix, &vector); - let target: Vec = vec![1., 2., 3., 8., 10., 12., 21., 24., 27., 40., 44., 48.]; - let target = vec_to_mat_fixed(&target, 4, false); - assert_mat_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_row_hadamard_sparse() { - let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = row_hadamard_sparse(&matrix, &vector); - let target: Vec = vec![1., 2., 3., 8., 10., 12., 21., 24., 27., 40., 44., 48.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = row_hadamard_sparse(&matrix, &vector); - let target: Vec = vec![0., 2., 3., 8., 0., 12., 21., 24., 0., 40., 44., 48.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = row_hadamard_sparse(&matrix, &vector); - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_row_sum() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = row_sum(&matrix); - let target: Vec = vec_to_fixed(&[6., 15., 24., 33.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_row_sum_sparse() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = row_sum_sparse(&matrix); - let target: Vec = vec_to_fixed(&[6., 15., 24., 33.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = row_sum_sparse(&matrix); - let target: Vec = vec_to_fixed(&[5., 10., 15., 33.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![1., 2., 3., 0., 0., 0., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = row_sum_sparse(&matrix); - let target: Vec = vec_to_fixed(&[6., 0., 24., 33.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = row_sum_sparse(&matrix); - let target: Vec = vec_to_fixed(&[0., 0., 0., 0.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_col_sum() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = col_sum(&matrix); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_col_sum_sparse() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[21., 21., 21.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![1., 0., 3., 4., 0., 6., 7., 0., 9., 10., 0., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 0., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[0., 0., 0.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); } - #[test] - fn test_math_matmul() { - let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = matmul(&matrix, &vector); - let target: Vec = vec_to_fixed(&[70., 80., 90.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_matmul_transpose() { - let vector: Vec = vec_to_fixed(&[1., 2., 3.]); - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = matmul_transpose(&matrix, &vector); - let target: Vec = vec_to_fixed(&[14., 32., 50., 68.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_sparse_matmul() { - let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = matmul_sparse(&matrix, &vector, 3); - let target: Vec = vec_to_fixed(&[70., 80., 90.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = matmul_sparse(&matrix, &vector, 3); - let target: Vec = vec_to_fixed(&[69., 70., 63.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = matmul_sparse(&matrix, &vector, 3); - let target: Vec = vec_to_fixed(&[0., 0., 0.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_sparse_matmul_transpose() { - let vector: Vec = vec_to_fixed(&[1., 2., 3.]); - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = matmul_transpose_sparse(&matrix, &vector); - let target: Vec = vec_to_fixed(&[14., 32., 50., 68.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = matmul_transpose_sparse(&matrix, &vector); - let target: Vec = vec_to_fixed(&[13., 22., 23., 68.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = matmul_transpose_sparse(&matrix, &vector); - let target: Vec = vec_to_fixed(&[0., 0., 0., 0.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_inplace_col_clip() { - let vector: Vec = vec_to_fixed(&[0., 5., 12.]); - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mut matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 5., 9., 0., 5., 12.]; - let target = vec_to_mat_fixed(&target, 4, false); - inplace_col_clip(&mut matrix, &vector); - assert_mat_compare(&matrix, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_col_clip_sparse() { - let vector: Vec = vec_to_fixed(&[0., 5., 12.]); - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 5., 9., 0., 5., 12.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = col_clip_sparse(&matrix, &vector); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 0., 0., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 0., 0., 0., 5., 12.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = col_clip_sparse(&matrix, &vector); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = col_clip_sparse(&matrix, &vector); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_clip_sparse() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = clip_sparse( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); - } - - #[test] - fn test_math_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - let result = clip( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&result, &target, I32F32::from_num(0)); - } + // Return the computed EMA sparse matrix. + result +} - #[test] - fn test_math_inplace_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mut matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - inplace_clip( - &mut matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&matrix, &target, I32F32::from_num(0)); +/// Return matrix exponential moving average: `alpha_j * a_ij + one_minus_alpha_j * b_ij`. +/// `alpha_` is the EMA coefficient passed as a vector per column. +#[allow(dead_code)] +pub fn mat_ema_alpha_vec( + new: &Vec>, + old: &Vec>, + alpha: &Vec, +) -> Vec> { + // Check if the new matrix is empty or its first row is empty. + if new.is_empty() || new[0].is_empty() { + return vec![vec![]; 1]; } - #[test] - fn test_math_weighted_median() { - let mut rng = thread_rng(); - let zero: I32F32 = fixed(0.); - let one: I32F32 = fixed(1.); - for _ in 0..100 { - let stake: Vec = vec_to_fixed(&[]); - let score: Vec = vec_to_fixed(&[]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - zero, - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); - - let stake: Vec = normalize(&vec_to_fixed(&[0.51])); - let score: Vec = vec_to_fixed(&[1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - one, - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); - - let stake: Vec = vec_to_fixed(&[0.49, 0.51]); - let score: Vec = vec_to_fixed(&[0.5, 1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - one, - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); - - let stake: Vec = vec_to_fixed(&[0.51, 0.49]); - let score: Vec = vec_to_fixed(&[0.5, 1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - fixed(0.5), - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); - - let stake: Vec = vec_to_fixed(&[0.49, 0., 0.51]); - let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - one, - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); - - let stake: Vec = vec_to_fixed(&[0.49, 0.01, 0.5]); - let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - fixed(0.7), - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); + // Ensure the dimensions of the new and old matrices match. + assert!(new.len() == old.len()); + assert!(new[0].len() == alpha.len()); - let stake: Vec = vec_to_fixed(&[0.49, 0.51, 0.0]); - let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - fixed(0.7), - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); + // Initialize the result matrix with zeros, having the same dimensions as the new matrix. + let mut result: Vec> = vec![vec![I32F32::from_num(0.0); new[0].len()]; new.len()]; - let stake: Vec = vec_to_fixed(&[0.0, 0.49, 0.51]); - let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - one, - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); - - let stake: Vec = vec_to_fixed(&[0.0, 0.49, 0.0, 0.51]); - let score: Vec = vec_to_fixed(&[0.5, 0.5, 1., 1.]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - one, - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); + // Iterate over each row of the matrices. + for i in 0..new.len() { + // Ensure the current row of the new and old matrices have the same length. + assert!(new[i].len() == old[i].len()); - let stake: Vec = vec_to_fixed(&[0.0, 0.49, 0.0, 0.51, 0.0]); - let score: Vec = vec_to_fixed(&[0.5, 0.5, 1., 1., 0.5]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - one, - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); + // Iterate over each column of the current row. + for j in 0..new[i].len() { + // Retrieve the alpha value for the current column. + let alpha_val = alpha[j]; - let stake: Vec = vec_to_fixed(&[0.2, 0.2, 0.2, 0.2, 0.2]); - let score: Vec = vec_to_fixed(&[0.8, 0.2, 1., 0.6, 0.4]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - fixed(0.6), - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); + // Calculate the complement of the alpha value. + let one_minus_alpha = I32F32::from_num(1.0) - alpha_val; - let stake: Vec = - vec_to_fixed(&[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]); - let score: Vec = - vec_to_fixed(&[0.8, 0.8, 0.2, 0.2, 1.0, 1.0, 0.6, 0.6, 0.4, 0.4]); - let majority: I32F32 = fixed(0.51); - assert_eq!( - fixed(0.6), - weighted_median( - &stake, - &score, - (0..stake.len()).collect::>().as_slice(), - one - majority, - zero, - stake.iter().sum() - ) - ); - - let n: usize = 100; - for majority in vec_to_fixed(&[ - 0., 0.0000001, 0.25, 0.49, 0.49, 0.49, 0.5, 0.51, 0.51, 0.51, 0.9999999, 1., - ]) { - for allow_equal in [false, true] { - let mut stake: Vec = vec![]; - let mut score: Vec = vec![]; - let mut last_score: I32F32 = zero; - for i in 0..n { - if allow_equal { - match rng.gen_range(0..2) { - 1 => stake.push(one), - _ => stake.push(zero), - } - if rng.gen_range(0..2) == 1 { - last_score += one - } - score.push(last_score); - } else { - stake.push(one); - score.push(I32F32::from_num(i)); - } - } - inplace_normalize(&mut stake); - let total_stake: I32F32 = stake.iter().sum(); - let mut minority: I32F32 = total_stake - majority; - if minority < zero { - minority = zero; - } - let mut medians: Vec = vec![]; - let mut median_stake: I32F32 = zero; - let mut median_set = false; - let mut stake_sum: I32F32 = zero; - for i in 0..n { - stake_sum += stake[i]; - if !median_set && stake_sum >= minority { - median_stake = stake_sum; - median_set = true; - } - if median_set { - if median_stake < stake_sum { - if median_stake == minority && !medians.contains(&score[i]) { - medians.push(score[i]); - } - break; - } - if !medians.contains(&score[i]) { - medians.push(score[i]); - } - } - } - if medians.is_empty() { - medians.push(zero); - } - let stake_idx: Vec = (0..stake.len()).collect(); - let result: I32F32 = - weighted_median(&stake, &score, &stake_idx, minority, zero, total_stake); - assert!(medians.contains(&result)); - for _ in 0..10 { - let mut permuted_uids: Vec = (0..n).collect(); - permuted_uids.shuffle(&mut thread_rng()); - stake = permuted_uids.iter().map(|&i| stake[i]).collect(); - score = permuted_uids.iter().map(|&i| score[i]).collect(); - let result: I32F32 = weighted_median( - &stake, - &score, - &stake_idx, - minority, - zero, - total_stake, - ); - assert!(medians.contains(&result)); - } - } - } + // Compute the EMA for the current element. + result[i][j] = alpha_val * new[i][j] + one_minus_alpha * old[i][j]; } } - #[test] - fn test_math_weighted_median_col() { - let stake: Vec = vec_to_fixed(&[]); - let weights: Vec> = vec![vec![]]; - let median: Vec = vec_to_fixed(&[]); - assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.5))); - - let stake: Vec = vec_to_fixed(&[0., 0.]); - let weights: Vec = vec![0., 0., 0., 0.]; - let weights: Vec> = vec_to_mat_fixed(&weights, 2, false); - let median: Vec = vec_to_fixed(&[0., 0.]); - assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.5))); - - let stake: Vec = vec_to_fixed(&[0., 0.75, 0.25, 0.]); - let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0.4, 0.5]; - let weights: Vec> = vec_to_mat_fixed(&weights, 4, false); - let median: Vec = vec_to_fixed(&[0., 0.3, 0.4]); - assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.24))); - let median: Vec = vec_to_fixed(&[0., 0.2, 0.4]); - assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.26))); - let median: Vec = vec_to_fixed(&[0., 0.2, 0.1]); - assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.76))); - - let stake: Vec = vec_to_fixed(&[0., 0.3, 0.2, 0.5]); - let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0., 0.5]; - let weights: Vec> = vec_to_mat_fixed(&weights, 4, false); - let median: Vec = vec_to_fixed(&[0., 0., 0.4]); - assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.51))); - } - - #[test] - fn test_math_weighted_median_col_sparse() { - let stake: Vec = vec_to_fixed(&[]); - let weights: Vec> = vec![vec![]]; - let median: Vec = vec_to_fixed(&[]); - assert_eq!( - median, - weighted_median_col_sparse(&stake, &weights, 0, fixed(0.5)) - ); - - let stake: Vec = vec_to_fixed(&[0., 0.]); - let weights: Vec = vec![0., 0., 0., 0.]; - let weights: Vec> = vec_to_sparse_mat_fixed(&weights, 2, false); - let median: Vec = vec_to_fixed(&[0., 0.]); - assert_eq!( - median, - weighted_median_col_sparse(&stake, &weights, 2, fixed(0.5)) - ); - - let stake: Vec = vec_to_fixed(&[0., 0.75, 0.25, 0.]); - let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0.4, 0.5]; - let weights: Vec> = vec_to_sparse_mat_fixed(&weights, 4, false); - let median: Vec = vec_to_fixed(&[0., 0.3, 0.4]); - assert_eq!( - median, - weighted_median_col_sparse(&stake, &weights, 3, fixed(0.24)) - ); - let median: Vec = vec_to_fixed(&[0., 0.2, 0.4]); - assert_eq!( - median, - weighted_median_col_sparse(&stake, &weights, 3, fixed(0.26)) - ); - let median: Vec = vec_to_fixed(&[0., 0.2, 0.1]); - assert_eq!( - median, - weighted_median_col_sparse(&stake, &weights, 3, fixed(0.76)) - ); - - let stake: Vec = vec_to_fixed(&[0., 0.3, 0.2, 0.5]); - let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0., 0.5]; - let weights: Vec> = vec_to_sparse_mat_fixed(&weights, 4, false); - let median: Vec = vec_to_fixed(&[0., 0., 0.4]); - assert_eq!( - median, - weighted_median_col_sparse(&stake, &weights, 3, fixed(0.51)) - ); - } - - #[test] - fn test_math_hadamard() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - } - - #[test] - fn test_math_hadamard_sparse() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - } - - #[test] - fn test_math_mat_ema() { - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, - ]; - let old = vec_to_mat_fixed(&old, 4, false); - let new = vec_to_mat_fixed(&new, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(0.1)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let old = vec_to_mat_fixed(&old, 4, false); - let new = vec_to_mat_fixed(&new, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(0)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let old = vec_to_mat_fixed(&old, 4, false); - let new = vec_to_mat_fixed(&new, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = mat_ema(&new, &old, I32F32::from_num(1)); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - } - - #[test] - fn test_math_sparse_mat_ema() { - let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let new: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, - ]; - let old = vec_to_sparse_mat_fixed(&old, 4, false); - let new = vec_to_sparse_mat_fixed(&new, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; - let target: Vec = vec![1., 3.8, 2.7, 7.6, 0., 11.4, 6.3, 15.2, 9., 19., 20.9, 22.8]; - let old = vec_to_sparse_mat_fixed(&old, 4, false); - let new = vec_to_sparse_mat_fixed(&new, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; - let target: Vec = vec![1., 2., 0., 4., 0., 6., 0., 8., 9., 10., 11., 12.]; - let old = vec_to_sparse_mat_fixed(&old, 4, false); - let new = vec_to_sparse_mat_fixed(&new, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let new: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let old = vec_to_sparse_mat_fixed(&old, 4, false); - let new = vec_to_sparse_mat_fixed(&new, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let old: Vec = vec![1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let new: Vec = vec![0., 0., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0.9, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.]; - let old = vec_to_sparse_mat_fixed(&old, 4, false); - let new = vec_to_sparse_mat_fixed(&new, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - } - - #[test] - fn test_math_matmul2() { - let epsilon: I32F32 = I32F32::from_num(0.0001); - let w: Vec> = vec![vec![I32F32::from_num(1.0); 3]; 3]; - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(1.0); 3]), - &[ - I32F32::from_num(3), - I32F32::from_num(3), - I32F32::from_num(3), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(2.0); 3]), - &[ - I32F32::from_num(6), - I32F32::from_num(6), - I32F32::from_num(6), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(3.0); 3]), - &[ - I32F32::from_num(9), - I32F32::from_num(9), - I32F32::from_num(9), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(-1.0); 3]), - &[ - I32F32::from_num(-3), - I32F32::from_num(-3), - I32F32::from_num(-3), - ], - epsilon, - ); - let w: Vec> = vec![vec![I32F32::from_num(-1.0); 3]; 3]; - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(1.0); 3]), - &[ - I32F32::from_num(-3), - I32F32::from_num(-3), - I32F32::from_num(-3), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(2.0); 3]), - &[ - I32F32::from_num(-6), - I32F32::from_num(-6), - I32F32::from_num(-6), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(3.0); 3]), - &[ - I32F32::from_num(-9), - I32F32::from_num(-9), - I32F32::from_num(-9), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(-1.0); 3]), - &[ - I32F32::from_num(3), - I32F32::from_num(3), - I32F32::from_num(3), - ], - epsilon, - ); - let w: Vec> = vec![ - vec![I32F32::from_num(1.0); 3], - vec![I32F32::from_num(2.0); 3], - vec![I32F32::from_num(3.0); 3], - ]; - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(0.0); 3]), - &[ - I32F32::from_num(0.0), - I32F32::from_num(0.0), - I32F32::from_num(0.0), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(2.0); 3]), - &[ - I32F32::from_num(12), - I32F32::from_num(12), - I32F32::from_num(12), - ], - epsilon, - ); - let w: Vec> = vec![ - vec![ - I32F32::from_num(1), - I32F32::from_num(2), - I32F32::from_num(3) - ]; - 3 - ]; - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(0.0); 3]), - &[ - I32F32::from_num(0.0), - I32F32::from_num(0.0), - I32F32::from_num(0.0), - ], - epsilon, - ); - assert_vec_compare( - &matmul(&w, &[I32F32::from_num(2.0); 3]), - &[ - I32F32::from_num(6), - I32F32::from_num(12), - I32F32::from_num(18), - ], - epsilon, - ); - } - - #[test] - fn test_math_fixed_to_u16() { - let expected = u16::MIN; - assert_eq!(fixed_to_u16(I32F32::from_num(expected)), expected); - - let expected = u16::MAX / 2; - assert_eq!(fixed_to_u16(I32F32::from_num(expected)), expected); - - let expected = u16::MAX; - assert_eq!(fixed_to_u16(I32F32::from_num(expected)), expected); - } - - #[test] - #[should_panic(expected = "overflow")] - fn test_math_fixed_to_u16_panics() { - let bad_input = I32F32::from_num(u32::MAX); - fixed_to_u16(bad_input); - - let bad_input = I32F32::from_num(-1); - fixed_to_u16(bad_input); - } - - // TODO: Investigate why `I32F32` and not `I64F64` - #[test] - fn test_math_fixed_to_u64() { - let expected = u64::MIN; - assert_eq!(fixed_to_u64(I32F32::from_num(expected)), expected); - - // let expected = u64::MAX / 2; - // assert_eq!(fixed_to_u64(I32F32::from_num(expected)), expected); - - // let expected = u64::MAX; - // assert_eq!(fixed_to_u64(I32F32::from_num(expected)), expected); - } - - #[test] - #[should_panic(expected = "-1 overflows")] - fn test_math_fixed_to_u64_panics() { - let bad_input = I32F32::from_num(-1); - fixed_to_u64(bad_input); - } - - #[test] - fn test_math_fixed64_to_u64() { - let expected = u64::MIN; - assert_eq!(fixed64_to_u64(I64F64::from_num(expected)), expected); - - let input = i64::MAX / 2; - let expected = u64::try_from(input).unwrap(); - assert_eq!(fixed64_to_u64(I64F64::from_num(input)), expected); - - let input = i64::MAX; - let expected = u64::try_from(input).unwrap(); - assert_eq!(fixed64_to_u64(I64F64::from_num(input)), expected); - } - - #[test] - #[should_panic(expected = "-1 overflows")] - fn test_math_fixed64_to_u64_panics() { - let bad_input = I64F64::from_num(-1); - fixed64_to_u64(bad_input); - } + // Return the computed EMA matrix. + result +} - /* @TODO: find the _true_ max, and half, input values */ - #[test] - fn test_math_fixed64_to_fixed32() { - let input = u64::MIN; - let expected = u32::try_from(input).unwrap(); - assert_eq!(fixed64_to_fixed32(I64F64::from_num(expected)), expected); +/// Return the quantile of a vector of I32F32 values. +pub fn quantile(data: &Vec, quantile: f64) -> I32F32 { + // Clone the input data to avoid modifying the original vector. + let mut sorted_data = data.clone(); - let expected = u32::MAX / 2; - let input = u64::from(expected); - assert_eq!(fixed64_to_fixed32(I64F64::from_num(input)), expected); - } - - #[test] - #[should_panic(expected = "overflow")] - fn test_math_fixed64_to_fixed32_panics() { - let bad_input = I64F64::from_num(u32::MAX); - fixed64_to_fixed32(bad_input); - } + // Sort the cloned data in ascending order, handling potential NaN values. + sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); - #[test] - fn test_math_u16_to_fixed() { - let input = u16::MIN; - let expected = I32F32::from_num(input); - assert_eq!(u16_to_fixed(input), expected); + // Get the length of the sorted data. + let len = sorted_data.len(); - let input = u16::MAX / 2; - let expected = I32F32::from_num(input); - assert_eq!(u16_to_fixed(input), expected); - - let input = u16::MAX; - let expected = I32F32::from_num(input); - assert_eq!(u16_to_fixed(input), expected); + // If the data is empty, return 0 as the quantile value. + if len == 0 { + return I32F32::from_num(0); } - #[test] - fn test_math_u16_proportion_to_fixed() { - let input = u16::MIN; - let expected = I32F32::from_num(input); - assert_eq!(u16_proportion_to_fixed(input), expected); - } + // Calculate the position in the sorted array corresponding to the quantile. + let pos = quantile * (len - 1) as f64; - #[test] - fn test_fixed_proportion_to_u16() { - let expected = u16::MIN; - let input = I32F32::from_num(expected); - assert_eq!(fixed_proportion_to_u16(input), expected); - } + // Determine the lower index by flooring the position. + let low = pos.floor() as usize; - #[test] - #[should_panic(expected = "overflow")] - fn test_fixed_proportion_to_u16_panics() { - let expected = u16::MAX; - let input = I32F32::from_num(expected); - fixed_proportion_to_u16(input); - } + // Determine the higher index by ceiling the position. + let high = pos.ceil() as usize; - #[test] - fn test_vec_fixed64_to_fixed32() { - let input = vec![I64F64::from_num(i32::MIN)]; - let expected = vec![I32F32::from_num(i32::MIN)]; - assert_eq!(vec_fixed64_to_fixed32(input), expected); + // If the low and high indices are the same, return the value at that index. + if low == high { + sorted_data[low] + } else { + // Otherwise, perform linear interpolation between the low and high values. + let low_value = sorted_data[low]; + let high_value = sorted_data[high]; - let input = vec![I64F64::from_num(i32::MAX)]; - let expected = vec![I32F32::from_num(i32::MAX)]; - assert_eq!(vec_fixed64_to_fixed32(input), expected); - } + // Calculate the weight for interpolation. + let weight = I32F32::from_num(pos - low as f64); - #[test] - #[should_panic(expected = "overflow")] - fn test_vec_fixed64_to_fixed32_panics() { - let bad_input = vec![I64F64::from_num(i64::MAX)]; - vec_fixed64_to_fixed32(bad_input); + // Return the interpolated value. + low_value + (high_value - low_value) * weight } +} - #[test] - #[allow(arithmetic_overflow)] - fn test_checked_sum() { - let overflowing_input = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, u64::MAX]; - // Expect None when overflow occurs - assert_eq!(checked_sum(&overflowing_input), None); - - let normal_input = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - // Expect Some when no overflow occurs - assert_eq!(checked_sum(&normal_input), Some(55)); - - let empty_input: Vec = vec![]; - // Expect Some(u16::default()) when input is empty - assert_eq!(checked_sum(&empty_input), Some(u16::default())); +// TODO: Go over business logic to ensure the fallback is correct. +/// Safe ln function, returns 0 if value is 0. +pub fn safe_ln(value: I32F32) -> I32F32 { + ln(value).unwrap_or(I32F32::from_num(0.0)) +} - let single_input = vec![1]; - // Expect Some(...) when input is a single value - assert_eq!(checked_sum(&single_input), Some(1)); - } +// TODO: Go over business logic to ensure the fallback is correct. +/// Safe exp function, returns 0 if value is 0. +pub fn safe_exp(value: I32F32) -> I32F32 { + exp(value).unwrap_or(I32F32::from_num(0.0)) } diff --git a/pallets/subtensor/src/root.rs b/pallets/subtensor/src/root.rs index 42c783c3b..a09957924 100644 --- a/pallets/subtensor/src/root.rs +++ b/pallets/subtensor/src/root.rs @@ -1009,12 +1009,12 @@ impl Pallet { NetworkRegisteredAt::::remove(netuid); // --- 8. Remove incentive mechanism memory. - let _ = Uids::::clear_prefix(netuid, u32::max_value(), None); - let _ = Keys::::clear_prefix(netuid, u32::max_value(), None); - let _ = Bonds::::clear_prefix(netuid, u32::max_value(), None); + let _ = Uids::::clear_prefix(netuid, u32::MAX, None); + let _ = Keys::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); // --- 8. Removes the weights for this subnet (do not remove). - let _ = Weights::::clear_prefix(netuid, u32::max_value(), None); + let _ = Weights::::clear_prefix(netuid, u32::MAX, None); // --- 9. Iterate over stored weights and fill the matrix. for (uid_i, weights_i) in diff --git a/pallets/subtensor/src/subnet_info.rs b/pallets/subtensor/src/subnet_info.rs index cf6b66aea..5d81cbb7b 100644 --- a/pallets/subtensor/src/subnet_info.rs +++ b/pallets/subtensor/src/subnet_info.rs @@ -52,6 +52,9 @@ pub struct SubnetHyperparams { difficulty: Compact, commit_reveal_weights_interval: Compact, commit_reveal_weights_enabled: bool, + alpha_high: Compact, + alpha_low: Compact, + liquid_alpha_enabled: bool, } impl Pallet { @@ -155,6 +158,9 @@ impl Pallet { let difficulty = Self::get_difficulty_as_u64(netuid); let commit_reveal_weights_interval = Self::get_commit_reveal_weights_interval(netuid); let commit_reveal_weights_enabled = Self::get_commit_reveal_weights_enabled(netuid); + let alpha_high = AlphaHigh::::get(netuid); + let alpha_low = AlphaLow::::get(netuid); + let liquid_alpha_enabled = LiquidAlphaOn::::get(netuid); Some(SubnetHyperparams { rho: rho.into(), @@ -181,6 +187,9 @@ impl Pallet { difficulty: difficulty.into(), commit_reveal_weights_interval: commit_reveal_weights_interval.into(), commit_reveal_weights_enabled, + alpha_high: alpha_high.into(), + alpha_low: alpha_low.into(), + liquid_alpha_enabled: liquid_alpha_enabled.into(), }) } } diff --git a/pallets/subtensor/src/utils.rs b/pallets/subtensor/src/utils.rs index 54b7818c9..1b5782b28 100644 --- a/pallets/subtensor/src/utils.rs +++ b/pallets/subtensor/src/utils.rs @@ -1,6 +1,10 @@ use super::*; -use crate::system::{ensure_root, ensure_signed_or_root}; +use crate::{ + system::{ensure_root, ensure_signed_or_root}, + Error, +}; use sp_core::U256; +use substrate_fixed::types::I32F32; impl Pallet { pub fn ensure_subnet_owner_or_root( @@ -658,4 +662,44 @@ impl Pallet { pub fn set_nominator_min_required_stake(min_stake: u64) { NominatorMinRequiredStake::::put(min_stake); } + + pub fn get_alpha_high(netuid: u16) -> I32F32 { + I32F32::from_num(AlphaHigh::::get(netuid) as f64 / 1000.0) + } + + pub fn set_alpha_high(netuid: u16, alpha_high: u16) -> Result<(), DispatchError> { + ensure!( + Self::get_liquid_alpha_enabled(netuid), + Error::::LiquidAlphaDisabled + ); + AlphaHigh::::insert(netuid, alpha_high); + + Ok(()) + } + + pub fn get_alpha_low(netuid: u16) -> I32F32 { + I32F32::from_num(AlphaLow::::get(netuid) as f64 / 1000.0) + } + + pub fn set_alpha_low(netuid: u16, alpha_low: u16) -> Result<(), DispatchError> { + ensure!( + Self::get_liquid_alpha_enabled(netuid), + Error::::LiquidAlphaDisabled + ); + AlphaLow::::insert(netuid, alpha_low); + + Ok(()) + } + + pub fn set_liquid_alpha_enabled(netuid: u16, enabled: bool) { + LiquidAlphaOn::::set(netuid, enabled); + } + + pub fn get_liquid_alpha_enabled(netuid: u16) -> bool { + LiquidAlphaOn::::get(netuid) + } + + // pub fn set_consensus_for_uid(netuid: u16, uid: u16, consensus: u16) { + // Consensus::::insert(netuid, uid, consensus); + // } } diff --git a/pallets/subtensor/src/weights.rs b/pallets/subtensor/src/weights.rs index 72c811f80..548bc30af 100644 --- a/pallets/subtensor/src/weights.rs +++ b/pallets/subtensor/src/weights.rs @@ -405,7 +405,7 @@ impl Pallet { return weights; } weights.iter_mut().for_each(|x| { - *x = (*x as u64 * u16::max_value() as u64 / sum) as u16; + *x = (*x as u64 * u16::MAX as u64 / sum) as u16; }); weights } diff --git a/pallets/subtensor/tests/block_step.rs b/pallets/subtensor/tests/block_step.rs index e1b4fe1de..5df173906 100644 --- a/pallets/subtensor/tests/block_step.rs +++ b/pallets/subtensor/tests/block_step.rs @@ -868,5 +868,30 @@ fn test_emission_based_on_registration_status() { .len(), n as usize ); + + let block: u64 = 0; + // drain the emission tuples for the subnet with registration on + SubtensorModule::drain_emission(block); + // Turn on registration for the subnet with registration off + SubtensorModule::set_network_registration_allowed(netuid_off, true); + SubtensorModule::set_network_registration_allowed(netuid_on, false); + + // Generate emission at the next block + let next_block: u64 = block + 1; + SubtensorModule::generate_emission(next_block); + + // Verify that emission tuples are now loaded for the subnet with registration turned on + assert!(SubtensorModule::get_loaded_emission_tuples(netuid_off).is_some()); + log::info!( + "Emissions for netuid with registration on: {:?}", + SubtensorModule::get_loaded_emission_tuples(netuid_on) + ); + assert!(SubtensorModule::get_loaded_emission_tuples(netuid_on).is_none()); + assert_eq!( + SubtensorModule::get_loaded_emission_tuples(netuid_off) + .unwrap() + .len(), + n as usize + ); }); } diff --git a/pallets/subtensor/tests/epoch.rs b/pallets/subtensor/tests/epoch.rs index 0bfd11ba4..9db8fc21d 100644 --- a/pallets/subtensor/tests/epoch.rs +++ b/pallets/subtensor/tests/epoch.rs @@ -1,6 +1,7 @@ use crate::mock::*; -use frame_support::assert_ok; +use frame_support::{assert_err, assert_ok}; use frame_system::Config; +use pallet_subtensor::*; use rand::{distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng, Rng, SeedableRng}; use sp_core::U256; use std::time::Instant; @@ -38,7 +39,7 @@ fn normalize_weights(mut weights: Vec) -> Vec { return weights; } weights.iter_mut().for_each(|x| { - *x = (*x as u64 * u16::max_value() as u64 / sum) as u16; + *x = (*x as u64 * u16::MAX as u64 / sum) as u16; }); weights } @@ -1262,6 +1263,322 @@ fn test_bonds() { }); } +#[test] +fn test_bonds_with_liquid_alpha() { + new_test_ext(1).execute_with(|| { + let sparse: bool = true; + let n: u16 = 8; + let netuid: u16 = 1; + let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead + let max_stake: u64 = 4; + let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; + let block_number = System::block_number(); + add_network(netuid, tempo, 0); + SubtensorModule::set_max_allowed_uids(netuid, n); + SubtensorModule::set_max_registrations_per_block(netuid, n); + SubtensorModule::set_target_registrations_per_interval(netuid, n); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_min_allowed_weights(netuid, 1); + SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + + // Register validators and servers + for key in 0..n as u64 { + SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), max_stake); + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + key * 1_000_000, + &U256::from(key), + ); + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(U256::from(key)), + netuid, + block_number, + nonce, + work, + U256::from(key), + U256::from(key) + )); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &U256::from(key), + &U256::from(key), + stakes[key as usize], + ); + } + + // Initilize with first epoch + SubtensorModule::epoch(netuid, 1_000_000_000); + next_block(); + + // Set weights + for uid in 0..(n / 2) as u16 { + SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![u16::MAX / 4, u16::MAX / 2, (u16::MAX / 4) * 3, u16::MAX], + 0 + )); + } + + // Enable Liquid Alpha + SubtensorModule::set_liquid_alpha_enabled(netuid, true); + assert_ok!(SubtensorModule::set_alpha_high(netuid, 900)); + assert_ok!(SubtensorModule::set_alpha_low(netuid, 100)); + // Run epoch with Liquid Alpha + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + // Check bonds and emissions + let bonds = SubtensorModule::get_bonds(netuid); + + /* n: 8 + current_block: 2; activity_cutoff: 5000; + Last update: [1, 1, 1, 1, 0, 0, 0, 0] + activity_cutoff: 5000 + Last update: [2, 2, 2, 2, 1, 1, 1, 1] + Inactive: [false, false, false, false, false, false, false, false] + Block at registration: [1, 1, 1, 1, 1, 1, 1, 1] + hotkeys: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)] + Stake: [1, 2, 3, 4, 0, 0, 0, 0] + Normalised Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] + validator_permits: [true, true, true, true, true, true, true, true] + max_allowed_validators: 8 + new_validator_permits: [true, true, true, true, true, true, true, true] + Active Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] + Weights: [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (permit): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (permit+diag): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (permit+diag+outdate): [[(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (mask+norm): [[(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] + Ranks (before): [0, 0, 0, 0, 0.099997558, 0.2000012202, 0.2999926745, 0.4000085443] + Consensus: [0, 0, 0, 0, 0.0999975584, 0.2000012207, 0.2999926754, 0.400008545] + Weights: [[(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] + Validator Trust: [0.9999999995, 0.9999999995, 0.9999999995, 0.9999999995, 0, 0, 0, 0] + Ranks (after): [0, 0, 0, 0, 0.099997558, 0.2000012202, 0.2999926745, 0.4000085443] + T: [0, 0, 0, 0, 1, 1, 1, 1] + Incentive (=Rank): [0, 0, 0, 0, 0.0999975582, 0.2000012207, 0, 0.0999975582, 0.2000012207, 0.2999926752, 0.4000085455] + B: [[], [], [], [], [], [], [], []] + B (outdatedmask): [[], [], [], [], [], [], [], []] + B (mask+norm): [[], [], [], [], [], [], [], []] + ΔB: [[(4, 0.0099997558), (5, 0.020000122), (6, 0.0299992673), (7, 0.0400008543)], [(4, 0.0199995115), (5, 0.040000244), (6, 0.0599985349), (7, 0.0800017088)], [(4, 0.0299992673), (5, 0.060000366), (6, 0.0899978024), (7, 0.1200025633)], [(4, 0.0399990233), (5, 0.080000488), (6, 0.11999707), (7, 0.1600034179)], [], [], [], []] + ΔB (norm): [[(4, 0.0999999996), (5, 0.0999999999), (6, 0.0999999994), (7, 0.0999999996)], [(4, 0.1999999995), (5, 0.2), (6, 0.1999999997), (7, 0.1999999997)], [(4, 0.299999999), (5, 0.2999999998), (6, 0.3), (7, 0.3)], [(4, 0.4000000013), (5, 0.4), (6, 0.4000000004), (7, 0.4000000001)], [], [], [], []] + Exponential Moving Average Bonds Liquid Alpha: [[(4, 0.0499983232), (5, 0.0899999999), (6, 0.0899999994), (7, 0.0899999996)], [(4, 0.0999966469), (5, 0.18), (6, 0.1799999997), (7, 0.1799999997)], [(4, 0.1499949703), (5, 0.2699999998), (6, 0.2699999998), (7, 0.2699999998)], [(4, 0.199993295), (5, 0.3599999999), (6, 0.36), (7, 0.3599999999)], [], [], [], []] + Exponential Moving Average Bonds: [[(4, 0.0999999992), (5, 0.0999999999), (6, 0.0999999994), (7, 0.0999999996)], [(4, 0.1999999995), (5, 0.2), (6, 0.1999999997), (7, 0.1999999997)], [(4, 0.2999999993), (5, 0.2999999998), (6, 0.3), (7, 0.3)], [(4, 0.4000000015), (5, 0.4), (6, 0.4000000004), (7, 0.4000000001)], [], [], [], []] + Dividends: [0.0999999994, 0.1999999997, 0.3, 0.4000000006, 0, 0, 0, 0] + Normalized Server Emission: [0, 0, 0, 0, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] + Server Emission: [0, 0, 0, 0, 49998779, 100000610, 149996337, 200004272] + Normalized Validator Emission: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0, 0, 0, 0] + Validator Emission: [49999999, 99999999, 149999999, 200000000, 0, 0, 0, 0] + Normalized Combined Emission: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] + Combined Emission: [49999999, 99999999, 149999999, 200000000, 49998779, 100000610, 149996337, 200004272] + Pruning Scores: [0.0499999996, 0.0999999999, 0.15, 0.2000000002, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] + */ + + // Expected bonds calculations + // For uid 0: + // Initial weights: [0.25, 0.5, 0.75, 1.0] + // Active stake: [1, 2, 3, 4] + // ΔB = W◦S = [0.25*1, 0.5*2, 0.75*3, 1.0*4] = [0.25, 1.0, 2.25, 4.0] + // Normalize ΔB: [0.25/7.5, 1.0/7.5, 2.25/7.5, 4.0/7.5] = [0.0333, 0.1333, 0.3, 0.5333] + // Final bonds for netuid: [16383, 32767, 49151, 65535] + + assert_eq!(bonds[0][4], 16383); // Note: Calculated as explained above + assert_eq!(bonds[1][4], 32767); // Note: Calculated as explained above + assert_eq!(bonds[2][4], 49151); // Note: Calculated as explained above + assert_eq!(bonds[3][4], 65535); // Note: Calculated as explained above + + // === Set self-weight only on val1 + let uid = 0; + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + vec![uid], + vec![u16::MAX], + 0 + )); + next_block(); + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + + let bonds = SubtensorModule::get_bonds(netuid); + assert_eq!(bonds[0][4], 14582); + assert_eq!(bonds[1][4], 32767); + assert_eq!(bonds[2][4], 49151); + assert_eq!(bonds[3][4], 65535); + + // === Set self-weight only on val2 + let uid = 1; + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + vec![uid], + vec![u16::MAX], + 0 + )); + next_block(); + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + let bonds = SubtensorModule::get_bonds(netuid); + + /* n: 8 + current_block: 4; activity_cutoff: 5000; + Last update: [2, 3, 2, 2, 1, 1, 1, 1] + Inactive: [false, false, false, false, false, false, false, false] + Block at registration: [1, 1, 1, 1, 1, 1, 1, 1] + hotkeys: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)] + Stake: [1, 2, 3, 4, 0, 0, 0, 0] + Normalised Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] + validator_permits: [true, true, true, true, true, true, true, true] + max_allowed_validators: 64 + new_validator_permits: [true, true, true, true, true, true, true, true] + Active Stake: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] + Weights: [[(0, 65535)], [(1, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (permit): [[(0, 65535)], [(1, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (permit+diag): [[], [], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (permit+diag+outdate): [[], [], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [(4, 16383), (5, 32767), (6, 49149), (7, 65535)], [], [], [], []] + Weights (mask+norm): [[], [], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] + Ranks (before): [0, 0, 0, 0, 0.0699982906, 0.1400008542, 0.2099948723, 0.2800059812] + Consensus: [0, 0, 0, 0, 0.0999975584, 0.2000012207, 0.2999926754, 0.400008545] + Weights: [[], [], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [(4, 0.0999975584), (5, 0.2000012207), (6, 0.2999926754), (7, 0.400008545)], [], [], [], []] + Validator Trust: [0, 0, 0.9999999995, 0.9999999995, 0, 0, 0, 0] + Ranks (after): [0, 0, 0, 0, 0.0699982906, 0.1400008542, 0.2099948723, 0.2800059812] + T: [0, 0, 0, 0, 1, 1, 1, 1] + Incentive (=Rank): [0, 0, 0, 0, 0.0999975582, 0.2000012207, 0.2999926754, 0.4000085455] + B: [[(4, 7760), (5, 1489), (6, 1489), (7, 1489)], [(4, 32767), (5, 32767), (6, 32767), (7, 32767)], [(4, 49151), (5, 49151), (6, 49151), (7, 49151)], [(4, 65535), (5, 65535), (6, 65535), (7, 65535)], [], [], [], []] + B (outdatedmask): [[(4, 7760), (5, 1489), (6, 1489), (7, 1489)], [(4, 32767), (5, 32767), (6, 32767), (7, 32767)], [(4, 49151), (5, 49151), (6, 49151), (7, 49151)], [(4, 65535), (5, 65535), (6, 65535), (7, 65535)], [], [], [], []] + B (mask+norm): [[(4, 0.0499958121), (5, 0.00999718), (6, 0.00999718), (7, 0.00999718)], [(4, 0.211109894), (5, 0.2199983886), (6, 0.2199983886), (7, 0.2199983886)], [(4, 0.3166680625), (5, 0.3300009398), (6, 0.3300009398), (7, 0.3300009398)], [(4, 0.4222262308), (5, 0.4400034912), (6, 0.4400034912), (7, 0.4400034912)], [], [], [], []] + ΔB: [[], [], [(4, 0.0299992673), (5, 0.060000366), (6, 0.0899978024), (7, 0.1200025633)], [(4, 0.0399990233), (5, 0.080000488), (6, 0.11999707), (7, 0.1600034179)], [], [], [], []] + ΔB (norm): [[], [], [(4, 0.428571427), (5, 0.4285714284), (6, 0.4285714284), (7, 0.4285714284)], [(4, 0.5714285728), (5, 0.5714285714), (6, 0.5714285714), (7, 0.5714285714)], [], [], [], []] + Exponential Moving Average Bonds Liquid Alpha: [[(4, 0.024998744), (5, 0.000999718), (6, 0.000999718), (7, 0.000999718)], [(4, 0.105558486), (5, 0.0219998388), (6, 0.0219998388), (7, 0.0219998388)], [(4, 0.3726178685), (5, 0.4187143792), (6, 0.4187143792), (7, 0.4187143792)], [(4, 0.4968249004), (5, 0.5582860631), (6, 0.5582860631), (7, 0.5582860631)], [], [], [], []] + Exponential Moving Average Bonds: [[(4, 0.024998744), (5, 0.000999718), (6, 0.000999718), (7, 0.000999718)], [(4, 0.105558486), (5, 0.0219998388), (6, 0.0219998388), (7, 0.0219998388)], [(4, 0.3726178687), (5, 0.4187143794), (6, 0.4187143794), (7, 0.4187143794)], [(4, 0.4968249009), (5, 0.5582860636), (6, 0.5582860636), (7, 0.5582860636)], [], [], [], []] + Dividends: [0.0033995616, 0.030355499, 0.4141048414, 0.5521400978, 0, 0, 0, 0] + Normalized Server Emission: [0, 0, 0, 0, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] + Server Emission: [0, 0, 0, 0, 49998779, 100000610, 149996337, 200004272] + Normalized Validator Emission: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0, 0, 0, 0] + Validator Emission: [1699780, 15177749, 207052420, 276070048, 0, 0, 0, 0] + Normalized Combined Emission: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] + Combined Emission: [1699780, 15177749, 207052420, 276070048, 49998779, 100000610, 149996337, 200004272] + Pruning Scores: [0.0016997808, 0.0151777493, 0.2070524206, 0.2760700488, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] + */ + + assert_eq!(bonds[0][4], 12603); + assert_eq!(bonds[1][4], 28321); + assert_eq!(bonds[2][4], 49151); + assert_eq!(bonds[3][4], 65535); + }); +} + +#[test] +fn test_bonds_with_extreme_alpha_values() { + new_test_ext(1).execute_with(|| { + let sparse: bool = true; + let n: u16 = 8; + let netuid: u16 = 1; + let tempo: u16 = u16::MAX - 1; + let max_stake: u64 = 4; + let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; + let block_number = System::block_number(); + add_network(netuid, tempo, 0); + SubtensorModule::set_max_allowed_uids(netuid, n); + SubtensorModule::set_max_registrations_per_block(netuid, n); + SubtensorModule::set_target_registrations_per_interval(netuid, n); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_min_allowed_weights(netuid, 1); + SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + + for key in 0..n as u64 { + SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), max_stake); + let (nonce, work) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + key * 1_000_000, + &U256::from(key), + ); + assert_ok!(SubtensorModule::register( + RuntimeOrigin::signed(U256::from(key)), + netuid, + block_number, + nonce, + work, + U256::from(key), + U256::from(key) + )); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &U256::from(key), + &U256::from(key), + stakes[key as usize], + ); + } + + SubtensorModule::epoch(netuid, 1_000_000_000); + next_block(); + + for uid in 0..(n / 2) as u16 { + SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + ((n / 2)..n).collect(), + vec![u16::MAX / 4, u16::MAX / 2, (u16::MAX / 4) * 3, u16::MAX], + 0 + )); + } + SubtensorModule::set_liquid_alpha_enabled(netuid, true); + assert_ok!(SubtensorModule::set_alpha_high(netuid, u16::MAX)); + assert_ok!(SubtensorModule::set_alpha_low(netuid, u16::MIN)); + // Run epoch with Liquid Alpha + if sparse { + SubtensorModule::epoch(netuid, 1_000_000_000); + } else { + SubtensorModule::epoch_dense(netuid, 1_000_000_000); + } + let bonds = SubtensorModule::get_bonds(netuid); + + log::info!("bonds: {:?}", bonds); + + // Check for reasonable outputs despite extreme alpha values + assert!(bonds + .iter() + .flatten() + .all(|&bond| bond >= 0 && bond <= 65535)); + }); +} + +#[test] +fn test_set_alpha_disabled() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = u16::MAX - 1; + add_network(netuid, tempo, 0); + + // Explicitly set to false + SubtensorModule::set_liquid_alpha_enabled(netuid, false); + assert_err!( + SubtensorModule::set_alpha_high(netuid, u16::MAX), + Error::::LiquidAlphaDisabled + ); + assert_err!( + SubtensorModule::set_alpha_low(netuid, 12_u16), + Error::::LiquidAlphaDisabled + ); + + SubtensorModule::set_liquid_alpha_enabled(netuid, true); + assert_ok!(SubtensorModule::set_alpha_high(netuid, u16::MAX)); + assert_ok!(SubtensorModule::set_alpha_low(netuid, 12_u16)); + }); +} + // Test that epoch masks out inactive stake of validators with outdated weights beyond activity cutoff. #[test] fn test_active_stake() { @@ -1972,6 +2289,385 @@ fn test_validator_permits() { } } +#[test] +fn test_compute_alpha_values() { + // Define the consensus values. + let consensus = vec![ + I32F32::from_num(0.1), + I32F32::from_num(0.5), + I32F32::from_num(0.9), + ]; + // Define the logistic function parameters 'a' and 'b'. + let a = I32F32::from_num(1.0); + let b = I32F32::from_num(0.0); + + // Compute the alpha values using the function. + let alpha = SubtensorModule::compute_alpha_values(&consensus, a, b); + + // Ensure the length of the alpha vector matches the consensus vector. + assert_eq!(alpha.len(), consensus.len()); + + // Manually compute the expected alpha values for each consensus value. + // The logistic function is: 1 / (1 + exp(b - a * c)) + // where c is the consensus value. + + // For consensus[0] = 0.1: + // exp_val = exp(0.0 - 1.0 * 0.1) = exp(-0.1) + // alpha[0] = 1 / (1 + exp(-0.1)) ~ 0.9048374180359595 + let exp_val_0 = I32F32::from_num(0.9048374180359595); + let expected_alpha_0 = + I32F32::from_num(1.0).saturating_div(I32F32::from_num(1.0).saturating_add(exp_val_0)); + + // For consensus[1] = 0.5: + // exp_val = exp(0.0 - 1.0 * 0.5) = exp(-0.5) + // alpha[1] = 1 / (1 + exp(-0.5)) ~ 0.6065306597126334 + let exp_val_1 = I32F32::from_num(0.6065306597126334); + let expected_alpha_1 = + I32F32::from_num(1.0).saturating_div(I32F32::from_num(1.0).saturating_add(exp_val_1)); + + // For consensus[2] = 0.9: + // exp_val = exp(0.0 - 1.0 * 0.9) = exp(-0.9) + // alpha[2] = 1 / (1 + exp(-0.9)) ~ 0.4065696597405991 + let exp_val_2 = I32F32::from_num(0.4065696597405991); + let expected_alpha_2 = + I32F32::from_num(1.0).saturating_div(I32F32::from_num(1.0).saturating_add(exp_val_2)); + + // Define an epsilon for approximate equality checks. + let epsilon = I32F32::from_num(1e-6); + + // Assert that the computed alpha values match the expected values within the epsilon. + assert_approx_eq(alpha[0], expected_alpha_0, epsilon); + assert_approx_eq(alpha[1], expected_alpha_1, epsilon); + assert_approx_eq(alpha[2], expected_alpha_2, epsilon); +} + +#[test] +fn test_clamp_alpha_values() { + // Define the alpha values. + let alpha = vec![ + I32F32::from_num(0.1), + I32F32::from_num(0.5), + I32F32::from_num(0.9), + ]; + // Define the high and low clamping values. + let alpha_high = I32F32::from_num(0.8); + let alpha_low = I32F32::from_num(0.2); + + // Compute the clamped alpha values using the function. + let clamped_alpha = SubtensorModule::clamp_alpha_values(alpha.clone(), alpha_high, alpha_low); + + // Ensure the length of the clamped alpha vector matches the original alpha vector. + assert_eq!(clamped_alpha.len(), alpha.len()); + + // Manually compute the expected clamped alpha values for each alpha value. + // The clamping logic is: max(alpha_low, min(alpha_high, a)) + + // For alpha[0] = 0.1: + // clamped_a = max(0.2, min(0.8, 0.1)) = max(0.2, 0.1) = 0.2 + let expected_clamped_alpha_0 = I32F32::from_num(0.2); + + // For alpha[1] = 0.5: + // clamped_a = max(0.2, min(0.8, 0.5)) = max(0.2, 0.5) = 0.5 + let expected_clamped_alpha_1 = I32F32::from_num(0.5); + + // For alpha[2] = 0.9: + // clamped_a = max(0.2, min(0.8, 0.9)) = max(0.2, 0.8) = 0.8 + let expected_clamped_alpha_2 = I32F32::from_num(0.8); + + // Assert that the computed clamped alpha values match the expected values. + assert_eq!(clamped_alpha[0], expected_clamped_alpha_0); + assert_eq!(clamped_alpha[1], expected_clamped_alpha_1); + assert_eq!(clamped_alpha[2], expected_clamped_alpha_2); +} + +#[test] +fn test_calculate_logistic_params() { + // Define test inputs + let alpha_high = I32F32::from_num(0.9); + let alpha_low = I32F32::from_num(0.1); + let consensus_high = I32F32::from_num(0.8); + let consensus_low = I32F32::from_num(0.2); + + // Expected values + // a = (ln((1 / alpha_high - 1)) - ln((1 / alpha_low - 1))) / (consensus_low - consensus_high) + // = (ln((1 / 0.9 - 1)) - ln((1 / 0.1 - 1))) / (0.2 - 0.8) + // = (ln(0.1111) - ln(9)) / -0.6 + // = (-2.1972 - 2.1972) / -0.6 + // = -4.3944 / -0.6 + // = 7.324 + let expected_a = I32F32::from_num(7.324); + + // b = ln((1 / alpha_low - 1)) + a * consensus_low + // = ln((1 / 0.1 - 1)) + 7.324 * 0.2 + // = ln(9) + 1.4648 + // = 2.1972 + 1.4648 + // = 3.662 + let expected_b = I32F32::from_num(3.662); + + // Call the function + let (a, b) = SubtensorModule::calculate_logistic_params( + alpha_high, + alpha_low, + consensus_high, + consensus_low, + ); + + // Assert the results + assert!( + (a - expected_a).abs() < I32F32::from_num(0.001), + "Expected a: {:?}, got: {:?}", + expected_a, + a + ); + assert!( + (b - expected_b).abs() < I32F32::from_num(0.001), + "Expected b: {:?}, got: {:?}", + expected_b, + b + ); +} + +#[test] +fn test_calculate_logistic_params_edge_cases() { + // Edge Case 1: Alpha values at their boundaries (0 and 1) + let alpha_high = I32F32::from_num(1.0); + let alpha_low = I32F32::from_num(0.0); + let consensus_high = I32F32::from_num(0.8); + let consensus_low = I32F32::from_num(0.2); + + // Call the function + let (a, b) = SubtensorModule::calculate_logistic_params( + alpha_high, + alpha_low, + consensus_high, + consensus_low, + ); + + // Assert the results + assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); + assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); + + // Edge Case 2: Consensus values at their boundaries (0 and 1) + let alpha_high = I32F32::from_num(0.9); + let alpha_low = I32F32::from_num(0.1); + let consensus_high = I32F32::from_num(1.0); + let consensus_low = I32F32::from_num(0.0); + + // Call the function + let (a, b) = SubtensorModule::calculate_logistic_params( + alpha_high, + alpha_low, + consensus_high, + consensus_low, + ); + + // Expected values + // a = (ln((1 / 0.9 - 1)) - ln((1 / 0.1 - 1))) / (0.0 - 1.0) + // = (ln(0.1111) - ln(9)) / -1.0 + // = (-2.1972 - 2.1972) / -1.0 + // = -4.3944 / -1.0 + // = 4.3944 + let expected_a = I32F32::from_num(4.3944); + + // b = ln((1 / 0.1 - 1)) + a * 0.0 + // = ln(9) + 0 + // = 2.1972 + let expected_b = I32F32::from_num(2.1972); + + // Assert the results + assert!( + (a - expected_a).abs() < I32F32::from_num(0.001), + "Expected a: {:?}, got: {:?}", + expected_a, + a + ); + assert!( + (b - expected_b).abs() < I32F32::from_num(0.001), + "Expected b: {:?}, got: {:?}", + expected_b, + b + ); + + // Edge Case 3: Alpha values being equal + let alpha_high = I32F32::from_num(0.5); + let alpha_low = I32F32::from_num(0.5); + let consensus_high = I32F32::from_num(0.8); + let consensus_low = I32F32::from_num(0.2); + + // Call the function + let (a, b) = SubtensorModule::calculate_logistic_params( + alpha_high, + alpha_low, + consensus_high, + consensus_low, + ); + + // Assert the results + assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); + assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); + + // Edge Case 4: Consensus values being equal + let alpha_high = I32F32::from_num(0.9); + let alpha_low = I32F32::from_num(0.1); + let consensus_high = I32F32::from_num(0.5); + let consensus_low = I32F32::from_num(0.5); + + // Call the function + let (a, b) = SubtensorModule::calculate_logistic_params( + alpha_high, + alpha_low, + consensus_high, + consensus_low, + ); + + // Assert the results + assert_eq!(a, I32F32::from_num(0.0), "Expected a to be 0, got: {:?}", a); + assert_eq!(b, I32F32::from_num(0.0), "Expected b to be 0, got: {:?}", b); +} + +#[test] +fn test_compute_ema_bonds_with_liquid_alpha_sparse() { + // Define test inputs + let bonds_delta = vec![ + vec![(0, I32F32::from_num(0.1)), (1, I32F32::from_num(0.2))], + vec![(0, I32F32::from_num(0.3)), (1, I32F32::from_num(0.4))], + ]; + let bonds = vec![ + vec![(0, I32F32::from_num(0.5)), (1, I32F32::from_num(0.6))], + vec![(0, I32F32::from_num(0.7)), (1, I32F32::from_num(0.8))], + ]; + let alpha = vec![I32F32::from_num(0.9), I32F32::from_num(0.8)]; + + // Expected values + // EMA calculation for each bond: + // EMA = alpha * bond_delta + (1 - alpha) * bond + // For bond (0, 0): + // EMA = 0.9 * 0.1 + (1 - 0.9) * 0.5 = 0.09 + 0.05 = 0.14 + // For bond (0, 1): + // EMA = 0.8 * 0.2 + (1 - 0.8) * 0.6 = 0.16 + 0.12 = 0.28 + // For bond (1, 0): + // EMA = 0.9 * 0.3 + (1 - 0.9) * 0.7 = 0.27 + 0.07 = 0.34 + // For bond (1, 1): + // EMA = 0.8 * 0.4 + (1 - 0.8) * 0.8 = 0.32 + 0.16 = 0.48 + let expected_ema_bonds = vec![ + vec![(0, I32F32::from_num(0.14)), (1, I32F32::from_num(0.28))], + vec![(0, I32F32::from_num(0.34)), (1, I32F32::from_num(0.48))], + ]; + + // Call the function + let ema_bonds = + SubtensorModule::compute_ema_bonds_with_liquid_alpha_sparse(&bonds_delta, &bonds, alpha); + + // Assert the results with an epsilon for approximate equality + let epsilon = I32F32::from_num(1e-6); + assert_approx_eq_vec_of_vec(&ema_bonds, &expected_ema_bonds, epsilon); +} + +#[test] +fn test_compute_ema_bonds_with_liquid_alpha_sparse_empty() { + // Test with empty inputs + let bonds_delta: Vec> = vec![]; + let bonds: Vec> = vec![]; + let alpha: Vec = vec![]; + + // Expected values: Empty Vec + let expected_ema_bonds: Vec> = vec![]; + + // Call the function + let ema_bonds = + SubtensorModule::compute_ema_bonds_with_liquid_alpha_sparse(&bonds_delta, &bonds, alpha); + + // Assert the results + assert_eq!( + ema_bonds, expected_ema_bonds, + "Expected EMA bonds: {:?}, got: {:?}", + expected_ema_bonds, ema_bonds + ); +} + +// #[test] +// fn test_compute_ema_bonds_sparse_with_liquid_alpha() { +// new_test_ext(1).execute_with(|| { +// let sparse: bool = true; +// let n: u16 = 8; +// let netuid: u16 = 1; +// let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead +// let max_stake: u64 = 4; +// let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; +// let block_number = System::block_number(); +// add_network(netuid, tempo, 0); +// SubtensorModule::set_max_allowed_uids(netuid, n); +// SubtensorModule::set_max_registrations_per_block(netuid, n); +// SubtensorModule::set_target_registrations_per_interval(netuid, n); +// SubtensorModule::set_weights_set_rate_limit(netuid, 0); +// SubtensorModule::set_min_allowed_weights(netuid, 1); +// SubtensorModule::set_max_weight_limit(netuid, u16::MAX); + +// // Register validators and servers +// for key in 0..n as u64 { +// SubtensorModule::add_balance_to_coldkey_account(&U256::from(key), max_stake); +// let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( +// netuid, +// block_number, +// key * 1_000_000, +// &U256::from(key), +// ); +// assert_ok!(SubtensorModule::register( +// <::RuntimeOrigin>::signed(U256::from(key)), +// netuid, +// block_number, +// nonce, +// work, +// U256::from(key), +// U256::from(key) +// )); +// SubtensorModule::increase_stake_on_coldkey_hotkey_account( +// &U256::from(key), +// &U256::from(key), +// stakes[key as usize], +// ); +// } + +// // Initialize with first epoch +// SubtensorModule::epoch(netuid, 1_000_000_000); +// step_block(1); + +// // Set weights +// for uid in 0..(n / 2) as u16 { +// SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); +// assert_ok!(SubtensorModule::set_weights( +// RuntimeOrigin::signed(U256::from(uid)), +// netuid, +// ((n / 2)..n).collect(), +// vec![u16::MAX / 4, u16::MAX / 2, (u16::MAX / 4) * 3, u16::MAX], +// 0 +// )); +// } + +// // Enable LiquidAlpha +// SubtensorModule::set_liquid_alpha_enabled(netuid, true); +// assert_eq!(SubtensorModule::get_liquid_alpha_enabled(netuid), true); + +// // Continue with additional epochs to mimic the end-to-end test +// // for _ in 0..5 { +// // next_block(); +// // if sparse { +// // SubtensorModule::epoch(netuid, 1_000_000_000); +// // } else { +// // SubtensorModule::epoch_dense(netuid, 1_000_000_000); +// // } +// // } + +// // Fetch the final bonds and validate +// let final_bonds = SubtensorModule::get_bonds(netuid); +// // Example assertions, adjust as needed based on expected LiquidAlpha values +// assert_eq!(final_bonds[0][0], 14582); +// assert_eq!(final_bonds[1][1], 32767); +// assert_eq!(final_bonds[2][2], 49151); +// assert_eq!(final_bonds[3][3], 65535); +// }); +// } + // // Map the retention graph for consensus guarantees with an single epoch on a graph with 512 nodes, of which the first 64 are validators, the graph is split into a major and minor set, each setting specific weight on itself and the complement on the other. // // // // ```import torch @@ -2070,3 +2766,47 @@ fn test_validator_permits() { // } // println!("]"); // } + +/// Helpers + +/// Asserts that two I32F32 values are approximately equal within a given epsilon. +/// +/// # Arguments +/// * `left` - The first value to compare. +/// * `right` - The second value to compare. +/// * `epsilon` - The maximum allowed difference between the two values. +fn assert_approx_eq(left: I32F32, right: I32F32, epsilon: I32F32) { + if (left - right).abs() > epsilon { + panic!( + "assertion failed: `(left ≈ right)`\n left: `{:?}`,\n right: `{:?}`,\n epsilon: `{:?}`", + left, right, epsilon + ); + } +} + +/// Helper function to assert approximate equality of two vectors of vectors of tuples. +fn assert_approx_eq_vec_of_vec( + left: &Vec>, + right: &Vec>, + epsilon: I32F32, +) { + assert_eq!(left.len(), right.len(), "Vectors have different lengths"); + for (left_row, right_row) in left.iter().zip(right.iter()) { + assert_eq!( + left_row.len(), + right_row.len(), + "Rows have different lengths" + ); + for ((left_idx, left_val), (right_idx, right_val)) in left_row.iter().zip(right_row.iter()) + { + assert_eq!(left_idx, right_idx, "Indices are different"); + assert!( + (left_val - right_val).abs() < epsilon, + "Values are different: left = {:?}, right = {:?}, epsilon = {:?}", + left_val, + right_val, + epsilon + ); + } + } +} diff --git a/pallets/subtensor/tests/math.rs b/pallets/subtensor/tests/math.rs new file mode 100644 index 000000000..bc1bf6cc1 --- /dev/null +++ b/pallets/subtensor/tests/math.rs @@ -0,0 +1,2345 @@ +#[allow(clippy::indexing_slicing)] +use substrate_fixed::types::{I32F32, I64F64}; + +use pallet_subtensor::math::*; +use rand::{seq::SliceRandom, thread_rng, Rng}; +use substrate_fixed::{ + transcendental::exp, + types::{I110F18, I96F32}, +}; + +fn assert_float_compare(a: I32F32, b: I32F32, epsilon: I32F32) { + assert!(I32F32::abs(a - b) <= epsilon, "a({:?}) != b({:?})", a, b); +} + +fn assert_float_compare_64(a: I64F64, b: I64F64, epsilon: I64F64) { + assert!(I64F64::abs(a - b) <= epsilon, "a({:?}) != b({:?})", a, b); +} + +fn assert_vec_compare(va: &[I32F32], vb: &[I32F32], epsilon: I32F32) { + assert!(va.len() == vb.len()); + for i in 0..va.len() { + assert_float_compare(va[i], vb[i], epsilon); + } +} + +fn assert_vec_compare_64(va: &[I64F64], vb: &[I64F64], epsilon: I64F64) { + assert!(va.len() == vb.len()); + for i in 0..va.len() { + assert_float_compare_64(va[i], vb[i], epsilon); + } +} + +fn assert_vec_compare_u16(va: &[u16], vb: &[u16]) { + assert!(va.len() == vb.len()); + for i in 0..va.len() { + assert_eq!(va[i], vb[i]); + } +} + +fn assert_mat_compare(ma: &[Vec], mb: &[Vec], epsilon: I32F32) { + assert!(ma.len() == mb.len()); + for row in 0..ma.len() { + assert!(ma[row].len() == mb[row].len()); + for col in 0..ma[row].len() { + assert_float_compare(ma[row][col], mb[row][col], epsilon) + } + } +} + +fn assert_sparse_mat_compare( + ma: &[Vec<(u16, I32F32)>], + mb: &[Vec<(u16, I32F32)>], + epsilon: I32F32, +) { + assert!(ma.len() == mb.len()); + for row in 0..ma.len() { + assert!(ma[row].len() == mb[row].len()); + for j in 0..ma[row].len() { + assert!(ma[row][j].0 == mb[row][j].0); // u16 + assert_float_compare(ma[row][j].1, mb[row][j].1, epsilon) // I32F32 + } + } +} + +fn vec_to_fixed(vector: &[f32]) -> Vec { + vector.iter().map(|x| I32F32::from_num(*x)).collect() +} + +fn mat_to_fixed(matrix: &[Vec]) -> Vec> { + matrix.iter().map(|row| vec_to_fixed(row)).collect() +} + +fn assert_mat_approx_eq(left: &Vec>, right: &Vec>, epsilon: I32F32) { + assert_eq!(left.len(), right.len()); + for (left_row, right_row) in left.iter().zip(right.iter()) { + assert_eq!(left_row.len(), right_row.len()); + for (left_val, right_val) in left_row.iter().zip(right_row.iter()) { + assert!( + (left_val - right_val).abs() <= epsilon, + "left: {:?}, right: {:?}", + left_val, + right_val + ); + } + } +} + +#[test] +fn test_vec_max_upscale_to_u16() { + let vector: Vec = vec_to_fixed(&[]); + let target: Vec = vec![]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0.]); + let target: Vec = vec![0]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 0.]); + let target: Vec = vec![0, 0]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 1.]); + let target: Vec = vec![0, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 0.000000001]); + let target: Vec = vec![0, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 0.000016, 1.]); + let target: Vec = vec![0, 1, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0.000000001, 0.000000001]); + let target: Vec = vec![65535, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[ + 0.000001, 0.000006, 0.000007, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, + ]); + let target: Vec = vec![0, 1, 1, 16, 164, 1638, 16384, 32768, 49151, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![I32F32::from_num(16384)]; + let target: Vec = vec![65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![I32F32::from_num(32768)]; + let target: Vec = vec![65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![I32F32::from_num(32769)]; + let target: Vec = vec![65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![I32F32::from_num(65535)]; + let target: Vec = vec![65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![I32F32::max_value()]; + let target: Vec = vec![65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 1., 65535.]); + let target: Vec = vec![0, 1, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 0.5, 1., 1.5, 2., 32768.]); + let target: Vec = vec![0, 1, 2, 3, 4, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 0.5, 1., 1.5, 2., 32768., 32769.]); + let target: Vec = vec![0, 1, 2, 3, 4, 65533, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![ + I32F32::from_num(0), + I32F32::from_num(1), + I32F32::from_num(32768), + I32F32::from_num(32769), + I32F32::max_value(), + ]; + let target: Vec = vec![0, 0, 1, 1, 65535]; + let result: Vec = vec_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); +} + +#[test] +fn test_vec_u16_max_upscale_to_u16() { + let vector: Vec = vec![]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &vector); + let vector: Vec = vec![0]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &vector); + let vector: Vec = vec![0, 0]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &vector); + let vector: Vec = vec![1]; + let target: Vec = vec![65535]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![0, 1]; + let target: Vec = vec![0, 65535]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![65534]; + let target: Vec = vec![65535]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![65535]; + let target: Vec = vec![65535]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![65535, 65535]; + let target: Vec = vec![65535, 65535]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![0, 1, 65534]; + let target: Vec = vec![0, 1, 65535]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &target); + let vector: Vec = vec![0, 1, 2, 3, 4, 65533, 65535]; + let result: Vec = vec_u16_max_upscale_to_u16(&vector); + assert_vec_compare_u16(&result, &vector); +} + +#[test] +fn test_check_vec_max_limited() { + let vector: Vec = vec![]; + let max_limit: u16 = 0; + assert!(check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![]; + let max_limit: u16 = u16::MAX; + assert!(check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![u16::MAX]; + let max_limit: u16 = u16::MAX; + assert!(check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![u16::MAX]; + let max_limit: u16 = u16::MAX - 1; + assert!(!check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![u16::MAX]; + let max_limit: u16 = 0; + assert!(!check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![0]; + let max_limit: u16 = u16::MAX; + assert!(check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![0, u16::MAX]; + let max_limit: u16 = u16::MAX; + assert!(check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![0, u16::MAX, u16::MAX]; + let max_limit: u16 = u16::MAX / 2; + assert!(!check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![0, u16::MAX, u16::MAX]; + let max_limit: u16 = u16::MAX / 2 + 1; + assert!(check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![0, u16::MAX, u16::MAX, u16::MAX]; + let max_limit: u16 = u16::MAX / 3 - 1; + assert!(!check_vec_max_limited(&vector, max_limit)); + let vector: Vec = vec![0, u16::MAX, u16::MAX, u16::MAX]; + let max_limit: u16 = u16::MAX / 3; + assert!(check_vec_max_limited(&vector, max_limit)); +} + +#[test] +fn test_math_fixed_overflow() { + let max_32: I32F32 = I32F32::max_value(); + let max_u64: u64 = u64::MAX; + let _prod_96: I96F32 = I96F32::from_num(max_32) * I96F32::from_num(max_u64); + // let one: I96F32 = I96F32::from_num(1); + // let prod_96: I96F32 = (I96F32::from_num(max_32) + one) * I96F32::from_num(max_u64); // overflows + let _prod_110: I110F18 = I110F18::from_num(max_32) * I110F18::from_num(max_u64); + + let bonds_moving_average_val: u64 = 900_000_u64; + let bonds_moving_average: I64F64 = + I64F64::from_num(bonds_moving_average_val) / I64F64::from_num(1_000_000); + let alpha: I32F32 = I32F32::from_num(1) - I32F32::from_num(bonds_moving_average); + assert_eq!(I32F32::from_num(0.1), alpha); + + let bonds_moving_average: I64F64 = I64F64::from_num(max_32) / I64F64::from_num(max_32); + let alpha: I32F32 = I32F32::from_num(1) - I32F32::from_num(bonds_moving_average); + assert_eq!(I32F32::from_num(0), alpha); +} + +#[test] +fn test_math_u64_normalization() { + let min: u64 = 1; + let min32: u64 = 4_889_444; // 21_000_000_000_000_000 / 4_294_967_296 + let mid: u64 = 10_500_000_000_000_000; + let max: u64 = 21_000_000_000_000_000; + let min_64: I64F64 = I64F64::from_num(min); + let min32_64: I64F64 = I64F64::from_num(min32); + let mid_64: I64F64 = I64F64::from_num(mid); + let max_64: I64F64 = I64F64::from_num(max); + let max_sum: I64F64 = I64F64::from_num(max); + let min_frac: I64F64 = min_64 / max_sum; + assert_eq!(min_frac, I64F64::from_num(0.0000000000000000476)); + let min_frac_32: I32F32 = I32F32::from_num(min_frac); + assert_eq!(min_frac_32, I32F32::from_num(0)); + let min32_frac: I64F64 = min32_64 / max_sum; + assert_eq!(min32_frac, I64F64::from_num(0.00000000023283066664)); + let min32_frac_32: I32F32 = I32F32::from_num(min32_frac); + assert_eq!(min32_frac_32, I32F32::from_num(0.0000000002)); + let half: I64F64 = mid_64 / max_sum; + assert_eq!(half, I64F64::from_num(0.5)); + let half_32: I32F32 = I32F32::from_num(half); + assert_eq!(half_32, I32F32::from_num(0.5)); + let one: I64F64 = max_64 / max_sum; + assert_eq!(one, I64F64::from_num(1)); + let one_32: I32F32 = I32F32::from_num(one); + assert_eq!(one_32, I32F32::from_num(1)); +} + +#[test] +fn test_math_to_num() { + let val: I32F32 = I32F32::from_num(u16::MAX); + let res: u16 = val.to_num::(); + assert_eq!(res, u16::MAX); + let vector: Vec = vec![val; 1000]; + let target: Vec = vec![u16::MAX; 1000]; + let output: Vec = vector.iter().map(|e: &I32F32| e.to_num::()).collect(); + assert_eq!(output, target); + let output: Vec = vector + .iter() + .map(|e: &I32F32| (*e).to_num::()) + .collect(); + assert_eq!(output, target); + let val: I32F32 = I32F32::max_value(); + let res: u64 = val.to_num::(); + let vector: Vec = vec![val; 1000]; + let target: Vec = vec![res; 1000]; + let output: Vec = vector.iter().map(|e: &I32F32| e.to_num::()).collect(); + assert_eq!(output, target); + let output: Vec = vector + .iter() + .map(|e: &I32F32| (*e).to_num::()) + .collect(); + assert_eq!(output, target); + let val: I32F32 = I32F32::from_num(0); + let res: u64 = val.to_num::(); + let vector: Vec = vec![val; 1000]; + let target: Vec = vec![res; 1000]; + let output: Vec = vector.iter().map(|e: &I32F32| e.to_num::()).collect(); + assert_eq!(output, target); + let output: Vec = vector + .iter() + .map(|e: &I32F32| (*e).to_num::()) + .collect(); + assert_eq!(output, target); + let val: I96F32 = I96F32::from_num(u64::MAX); + let res: u64 = val.to_num::(); + assert_eq!(res, u64::MAX); + let vector: Vec = vec![val; 1000]; + let target: Vec = vec![u64::MAX; 1000]; + let output: Vec = vector.iter().map(|e: &I96F32| e.to_num::()).collect(); + assert_eq!(output, target); + let output: Vec = vector + .iter() + .map(|e: &I96F32| (*e).to_num::()) + .collect(); + assert_eq!(output, target); +} + +#[test] +fn test_math_vec_to_fixed() { + let vector: Vec = vec![0., 1., 2., 3.]; + let target: Vec = vec![ + I32F32::from_num(0.), + I32F32::from_num(1.), + I32F32::from_num(2.), + I32F32::from_num(3.), + ]; + let result = vec_to_fixed(&vector); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +// Reshape vector to matrix with specified number of rows, cast to I32F32. +fn vec_to_mat_fixed(vector: &[f32], rows: usize, transpose: bool) -> Vec> { + assert!( + vector.len() % rows == 0, + "Vector of len {:?} cannot reshape to {rows} rows.", + vector.len() + ); + let cols: usize = vector.len() / rows; + let mut mat: Vec> = vec![]; + if transpose { + for col in 0..cols { + let mut vals: Vec = vec![]; + for row in 0..rows { + vals.push(I32F32::from_num(vector[row * cols + col])); + } + mat.push(vals); + } + } else { + for row in 0..rows { + mat.push( + vector[row * cols..(row + 1) * cols] + .iter() + .map(|v| I32F32::from_num(*v)) + .collect(), + ); + } + } + mat +} + +#[test] +fn test_math_vec_to_mat_fixed() { + let vector: Vec = vec![0., 1., 2., 0., 10., 100.]; + let target: Vec> = vec![ + vec![ + I32F32::from_num(0.), + I32F32::from_num(1.), + I32F32::from_num(2.), + ], + vec![ + I32F32::from_num(0.), + I32F32::from_num(10.), + I32F32::from_num(100.), + ], + ]; + let mat = vec_to_mat_fixed(&vector, 2, false); + assert_mat_compare(&mat, &target, I32F32::from_num(0)); +} + +// Reshape vector to sparse matrix with specified number of input rows, cast f32 to I32F32. +fn vec_to_sparse_mat_fixed( + vector: &[f32], + rows: usize, + transpose: bool, +) -> Vec> { + assert!( + vector.len() % rows == 0, + "Vector of len {:?} cannot reshape to {rows} rows.", + vector.len() + ); + let cols: usize = vector.len() / rows; + let mut mat: Vec> = vec![]; + if transpose { + for col in 0..cols { + let mut row_vec: Vec<(u16, I32F32)> = vec![]; + for row in 0..rows { + if vector[row * cols + col] > 0. { + row_vec.push((row as u16, I32F32::from_num(vector[row * cols + col]))); + } + } + mat.push(row_vec); + } + } else { + for row in 0..rows { + let mut row_vec: Vec<(u16, I32F32)> = vec![]; + for col in 0..cols { + if vector[row * cols + col] > 0. { + row_vec.push((col as u16, I32F32::from_num(vector[row * cols + col]))); + } + } + mat.push(row_vec); + } + } + mat +} + +#[test] +fn test_math_vec_to_sparse_mat_fixed() { + let vector: Vec = vec![0., 1., 2., 0., 10., 100.]; + let target: Vec> = vec![ + vec![(1_u16, I32F32::from_num(1.)), (2_u16, I32F32::from_num(2.))], + vec![ + (1_u16, I32F32::from_num(10.)), + (2_u16, I32F32::from_num(100.)), + ], + ]; + let mat = vec_to_sparse_mat_fixed(&vector, 2, false); + assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); + let vector: Vec = vec![0., 0.]; + let target: Vec> = vec![vec![], vec![]]; + let mat = vec_to_sparse_mat_fixed(&vector, 2, false); + assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); + let vector: Vec = vec![0., 1., 2., 0., 10., 100.]; + let target: Vec> = vec![ + vec![], + vec![ + (0_u16, I32F32::from_num(1.)), + (1_u16, I32F32::from_num(10.)), + ], + vec![ + (0_u16, I32F32::from_num(2.)), + (1_u16, I32F32::from_num(100.)), + ], + ]; + let mat = vec_to_sparse_mat_fixed(&vector, 2, true); + assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); + let vector: Vec = vec![0., 0.]; + let target: Vec> = vec![vec![]]; + let mat = vec_to_sparse_mat_fixed(&vector, 2, true); + assert_sparse_mat_compare(&mat, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_exp_safe() { + let zero: I32F32 = I32F32::from_num(0); + let one: I32F32 = I32F32::from_num(1); + let target: I32F32 = exp(zero).unwrap(); + assert_eq!(exp_safe(zero), target); + let target: I32F32 = exp(one).unwrap(); + assert_eq!(exp_safe(one), target); + let min_input: I32F32 = I32F32::from_num(-20); // <= 1/exp(-20) = 485 165 195,4097903 + let max_input: I32F32 = I32F32::from_num(20); // <= exp(20) = 485 165 195,4097903 + let target: I32F32 = exp(min_input).unwrap(); + assert_eq!(exp_safe(min_input), target); + assert_eq!(exp_safe(min_input - one), target); + assert_eq!(exp_safe(I32F32::min_value()), target); + let target: I32F32 = exp(max_input).unwrap(); + assert_eq!(exp_safe(max_input), target); + assert_eq!(exp_safe(max_input + one), target); + assert_eq!(exp_safe(I32F32::max_value()), target); +} + +#[test] +fn test_math_sigmoid_safe() { + let trust: Vec = vec![ + I32F32::min_value(), + I32F32::from_num(0), + I32F32::from_num(0.4), + I32F32::from_num(0.5), + I32F32::from_num(0.6), + I32F32::from_num(1), + I32F32::max_value(), + ]; + let consensus: Vec = trust + .iter() + .map(|t: &I32F32| sigmoid_safe(*t, I32F32::max_value(), I32F32::max_value())) + .collect(); + let target: Vec = vec_to_fixed(&[ + 0.0000000019, + 0.0000000019, + 0.0000000019, + 0.0000000019, + 0.0000000019, + 0.0000000019, + 0.5, + ]); + assert_eq!(&consensus, &target); + let consensus: Vec = trust + .iter() + .map(|t: &I32F32| sigmoid_safe(*t, I32F32::min_value(), I32F32::min_value())) + .collect(); + let target: Vec = vec_to_fixed(&[ + 0.5, + 0.0000000019, + 0.0000000019, + 0.0000000019, + 0.0000000019, + 0.0000000019, + 0.0000000019, + ]); + assert_eq!(&consensus, &target); + let consensus: Vec = trust + .iter() + .map(|t: &I32F32| sigmoid_safe(*t, I32F32::from_num(30), I32F32::from_num(0.5))) + .collect(); + let target: Vec = vec![ + 0.0000000019, + 0.0000003057, + 0.0474258729, + 0.5, + 0.952574127, + 0.9999996943, + 0.9999999981, + ]; + let target: Vec = target.iter().map(|c: &f64| I32F32::from_num(*c)).collect(); + assert_eq!(&consensus, &target); + let trust: Vec = vec_to_fixed(&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.]); + let consensus: Vec = trust + .iter() + .map(|t: &I32F32| sigmoid_safe(*t, I32F32::from_num(40), I32F32::from_num(0.5))) + .collect(); + let target: Vec = vec![ + 0.0000000019, + 0.0000001125, + 0.0000061442, + 0.0003353502, + 0.017986214, + 0.5, + 0.9820138067, + 0.9996646498, + 0.9999938558, + 0.9999998875, + 0.9999999981, + ]; + let target: Vec = target.iter().map(|c: &f64| I32F32::from_num(*c)).collect(); + assert_eq!(&consensus, &target); +} + +#[test] +fn test_math_is_topk() { + let vector: Vec = vec_to_fixed(&[]); + let result = is_topk(&vector, 5); + let target: Vec = vec![]; + assert_eq!(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]); + let result = is_topk(&vector, 0); + let target: Vec = vec![ + false, false, false, false, false, false, false, false, false, false, + ]; + assert_eq!(&result, &target); + let result = is_topk(&vector, 5); + let target: Vec = vec![ + false, false, false, false, false, true, true, true, true, true, + ]; + assert_eq!(&result, &target); + let result = is_topk(&vector, 10); + let target: Vec = vec![true, true, true, true, true, true, true, true, true, true]; + assert_eq!(&result, &target); + let result = is_topk(&vector, 100); + assert_eq!(&result, &target); + let vector: Vec = vec_to_fixed(&[9., 8., 7., 6., 5., 4., 3., 2., 1., 0.]); + let result = is_topk(&vector, 5); + let target: Vec = vec![ + true, true, true, true, true, false, false, false, false, false, + ]; + assert_eq!(&result, &target); + let vector: Vec = vec_to_fixed(&[9., 0., 8., 1., 7., 2., 6., 3., 5., 4.]); + let result = is_topk(&vector, 5); + let target: Vec = vec![ + true, false, true, false, true, false, true, false, true, false, + ]; + assert_eq!(&result, &target); + let vector: Vec = vec_to_fixed(&[0.9, 0., 0.8, 0.1, 0.7, 0.2, 0.6, 0.3, 0.5, 0.4]); + let result = is_topk(&vector, 5); + let target: Vec = vec![ + true, false, true, false, true, false, true, false, true, false, + ]; + assert_eq!(&result, &target); + let vector: Vec = vec_to_fixed(&[0., 1., 2., 3., 4., 5., 5., 5., 5., 6.]); + let result = is_topk(&vector, 5); + let target: Vec = vec![ + false, false, false, false, false, true, true, true, true, true, + ]; + assert_eq!(&result, &target); +} + +#[test] +fn test_math_sum() { + assert!(sum(&[]) == I32F32::from_num(0)); + assert!( + sum(&[ + I32F32::from_num(1.0), + I32F32::from_num(10.0), + I32F32::from_num(30.0) + ]) == I32F32::from_num(41) + ); + assert!( + sum(&[ + I32F32::from_num(-1.0), + I32F32::from_num(10.0), + I32F32::from_num(30.0) + ]) == I32F32::from_num(39) + ); +} + +#[test] +fn test_math_normalize() { + let epsilon: I32F32 = I32F32::from_num(0.0001); + let x: Vec = vec![]; + let y: Vec = normalize(&x); + assert_vec_compare(&x, &y, epsilon); + let x: Vec = vec![ + I32F32::from_num(1.0), + I32F32::from_num(10.0), + I32F32::from_num(30.0), + ]; + let y: Vec = normalize(&x); + assert_vec_compare( + &y, + &[ + I32F32::from_num(0.0243902437), + I32F32::from_num(0.243902439), + I32F32::from_num(0.7317073171), + ], + epsilon, + ); + assert_float_compare(sum(&y), I32F32::from_num(1.0), epsilon); + let x: Vec = vec![ + I32F32::from_num(-1.0), + I32F32::from_num(10.0), + I32F32::from_num(30.0), + ]; + let y: Vec = normalize(&x); + assert_vec_compare( + &y, + &[ + I32F32::from_num(-0.0256410255), + I32F32::from_num(0.2564102563), + I32F32::from_num(0.769230769), + ], + epsilon, + ); + assert_float_compare(sum(&y), I32F32::from_num(1.0), epsilon); +} + +#[test] +fn test_math_inplace_normalize() { + let epsilon: I32F32 = I32F32::from_num(0.0001); + let mut x1: Vec = vec![ + I32F32::from_num(1.0), + I32F32::from_num(10.0), + I32F32::from_num(30.0), + ]; + inplace_normalize(&mut x1); + assert_vec_compare( + &x1, + &[ + I32F32::from_num(0.0243902437), + I32F32::from_num(0.243902439), + I32F32::from_num(0.7317073171), + ], + epsilon, + ); + let mut x2: Vec = vec![ + I32F32::from_num(-1.0), + I32F32::from_num(10.0), + I32F32::from_num(30.0), + ]; + inplace_normalize(&mut x2); + assert_vec_compare( + &x2, + &[ + I32F32::from_num(-0.0256410255), + I32F32::from_num(0.2564102563), + I32F32::from_num(0.769230769), + ], + epsilon, + ); +} + +#[test] +fn test_math_inplace_normalize_64() { + let epsilon: I64F64 = I64F64::from_num(0.0001); + let mut x1: Vec = vec![ + I64F64::from_num(1.0), + I64F64::from_num(10.0), + I64F64::from_num(30.0), + ]; + inplace_normalize_64(&mut x1); + assert_vec_compare_64( + &x1, + &[ + I64F64::from_num(0.0243902437), + I64F64::from_num(0.243902439), + I64F64::from_num(0.7317073171), + ], + epsilon, + ); + let mut x2: Vec = vec![ + I64F64::from_num(-1.0), + I64F64::from_num(10.0), + I64F64::from_num(30.0), + ]; + inplace_normalize_64(&mut x2); + assert_vec_compare_64( + &x2, + &[ + I64F64::from_num(-0.0256410255), + I64F64::from_num(0.2564102563), + I64F64::from_num(0.769230769), + ], + epsilon, + ); +} + +#[test] +fn test_math_vecdiv() { + let x: Vec = vec_to_fixed(&[]); + let y: Vec = vec_to_fixed(&[]); + let result: Vec = vec_to_fixed(&[]); + assert_eq!(result, vecdiv(&x, &y)); + + let x: Vec = vec_to_fixed(&[0., 1., 0., 1.]); + let y: Vec = vec_to_fixed(&[0., 1., 1., 0.]); + let result: Vec = vec_to_fixed(&[0., 1., 0., 0.]); + assert_eq!(result, vecdiv(&x, &y)); + + let x: Vec = vec_to_fixed(&[1., 1., 10.]); + let y: Vec = vec_to_fixed(&[2., 3., 2.]); + let result: Vec = vec![fixed(1.) / fixed(2.), fixed(1.) / fixed(3.), fixed(5.)]; + assert_eq!(result, vecdiv(&x, &y)); +} + +#[test] +fn test_math_inplace_row_normalize() { + let epsilon: I32F32 = I32F32::from_num(0.0001); + let vector: Vec = vec![ + 0., 1., 2., 3., 4., 0., 10., 100., 1000., 10000., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., + ]; + let mut mat = vec_to_mat_fixed(&vector, 4, false); + inplace_row_normalize(&mut mat); + let target: Vec = vec![ + 0., 0.1, 0.2, 0.3, 0.4, 0., 0.0009, 0.009, 0.09, 0.9, 0., 0., 0., 0., 0., 0.2, 0.2, 0.2, + 0.2, 0.2, + ]; + assert_mat_compare(&mat, &vec_to_mat_fixed(&target, 4, false), epsilon); +} + +#[test] +fn test_math_inplace_row_normalize_sparse() { + let epsilon: I32F32 = I32F32::from_num(0.0001); + let vector: Vec = vec![ + 0., 1., 0., 2., 0., 3., 4., 0., 1., 0., 2., 0., 3., 0., 1., 0., 0., 2., 0., 3., 4., 0., + 10., 0., 100., 1000., 0., 10000., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., + ]; + let mut mat = vec_to_sparse_mat_fixed(&vector, 6, false); + inplace_row_normalize_sparse(&mut mat); + let target: Vec = vec![ + 0., 0.1, 0., 0.2, 0., 0.3, 0.4, 0., 0.166666, 0., 0.333333, 0., 0.5, 0., 0.1, 0., 0., 0.2, + 0., 0.3, 0.4, 0., 0.0009, 0., 0.009, 0.09, 0., 0.9, 0., 0., 0., 0., 0., 0., 0., 0.142857, + 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, + ]; + assert_sparse_mat_compare(&mat, &vec_to_sparse_mat_fixed(&target, 6, false), epsilon); + let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mut mat = vec_to_sparse_mat_fixed(&vector, 3, false); + inplace_row_normalize_sparse(&mut mat); + assert_sparse_mat_compare( + &mat, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + +#[test] +fn test_math_inplace_col_normalize() { + let epsilon: I32F32 = I32F32::from_num(0.0001); + let vector: Vec = vec![ + 0., 1., 2., 3., 4., 0., 10., 100., 1000., 10000., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., + ]; + let mut mat = vec_to_mat_fixed(&vector, 4, true); + inplace_col_normalize(&mut mat); + let target: Vec = vec![ + 0., 0.1, 0.2, 0.3, 0.4, 0., 0.0009, 0.009, 0.09, 0.9, 0., 0., 0., 0., 0., 0.2, 0.2, 0.2, + 0.2, 0.2, + ]; + assert_mat_compare(&mat, &vec_to_mat_fixed(&target, 4, true), epsilon); +} + +#[test] +fn test_math_inplace_col_normalize_sparse() { + let epsilon: I32F32 = I32F32::from_num(0.0001); + let vector: Vec = vec![ + 0., 1., 0., 2., 0., 3., 4., 0., 1., 0., 2., 0., 3., 0., 1., 0., 0., 2., 0., 3., 4., 0., + 10., 0., 100., 1000., 0., 10000., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., + ]; + let mut mat = vec_to_sparse_mat_fixed(&vector, 6, true); + inplace_col_normalize_sparse(&mut mat, 6); + let target: Vec = vec![ + 0., 0.1, 0., 0.2, 0., 0.3, 0.4, 0., 0.166666, 0., 0.333333, 0., 0.5, 0., 0.1, 0., 0., 0.2, + 0., 0.3, 0.4, 0., 0.0009, 0., 0.009, 0.09, 0., 0.9, 0., 0., 0., 0., 0., 0., 0., 0.142857, + 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, 0.142857, + ]; + assert_sparse_mat_compare(&mat, &vec_to_sparse_mat_fixed(&target, 6, true), epsilon); + let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mut mat = vec_to_sparse_mat_fixed(&vector, 3, false); + inplace_col_normalize_sparse(&mut mat, 6); + assert_sparse_mat_compare( + &mat, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let mut mat: Vec> = vec![]; + let target: Vec> = vec![]; + inplace_col_normalize_sparse(&mut mat, 0); + assert_sparse_mat_compare(&mat, &target, epsilon); +} + +#[test] +fn test_math_inplace_col_max_upscale() { + let mut mat: Vec> = vec![vec![]]; + let target: Vec> = vec![vec![]]; + inplace_col_max_upscale(&mut mat); + assert_eq!(&mat, &target); + let mut mat: Vec> = vec![vec![I32F32::from_num(0)]]; + let target: Vec> = vec![vec![I32F32::from_num(0)]]; + inplace_col_max_upscale(&mut mat); + assert_eq!(&mat, &target); + let epsilon: I32F32 = I32F32::from_num(0.0001); + let vector: Vec = vec![ + 0., 1., 2., 3., 4., 0., 10., 100., 1000., 10000., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., + ]; + let mut mat: Vec> = vec_to_mat_fixed(&vector, 4, true); + inplace_col_max_upscale(&mut mat); + let target: Vec = vec![ + 0., 0.25, 0.5, 0.75, 1., 0., 0.001, 0.01, 0.1, 1., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., + ]; + assert_mat_compare(&mat, &vec_to_mat_fixed(&target, 4, true), epsilon); +} + +#[test] +fn test_math_inplace_col_max_upscale_sparse() { + let mut mat: Vec> = vec![vec![]]; + let target: Vec> = vec![vec![]]; + inplace_col_max_upscale_sparse(&mut mat, 0); + assert_eq!(&mat, &target); + let mut mat: Vec> = vec![vec![(0, I32F32::from_num(0))]]; + let target: Vec> = vec![vec![(0, I32F32::from_num(0))]]; + inplace_col_max_upscale_sparse(&mut mat, 1); + assert_eq!(&mat, &target); + let epsilon: I32F32 = I32F32::from_num(0.0001); + let vector: Vec = vec![ + 0., 1., 0., 2., 0., 3., 4., 0., 1., 0., 2., 0., 3., 0., 1., 0., 0., 2., 0., 3., 4., 0., + 10., 0., 100., 1000., 0., 10000., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., + ]; + let mut mat = vec_to_sparse_mat_fixed(&vector, 6, true); + inplace_col_max_upscale_sparse(&mut mat, 6); + let target: Vec = vec![ + 0., 0.25, 0., 0.5, 0., 0.75, 1., 0., 0.333333, 0., 0.666666, 0., 1., 0., 0.25, 0., 0., 0.5, + 0., 0.75, 1., 0., 0.001, 0., 0.01, 0.1, 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., + 1., 1., 1., + ]; + assert_sparse_mat_compare(&mat, &vec_to_sparse_mat_fixed(&target, 6, true), epsilon); + let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mut mat = vec_to_sparse_mat_fixed(&vector, 3, false); + inplace_col_max_upscale_sparse(&mut mat, 6); + assert_sparse_mat_compare( + &mat, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let mut mat: Vec> = vec![]; + let target: Vec> = vec![]; + inplace_col_max_upscale_sparse(&mut mat, 0); + assert_sparse_mat_compare(&mat, &target, epsilon); +} + +#[test] +fn test_math_inplace_mask_vector() { + let mask: Vec = vec![false, false, false]; + let mut vector: Vec = vec_to_fixed(&[0., 1., 2.]); + let target: Vec = vec_to_fixed(&[0., 1., 2.]); + inplace_mask_vector(&mask, &mut vector); + assert_vec_compare(&vector, &target, I32F32::from_num(0)); + let mask: Vec = vec![false, true, false]; + let mut vector: Vec = vec_to_fixed(&[0., 1., 2.]); + let target: Vec = vec_to_fixed(&[0., 0., 2.]); + inplace_mask_vector(&mask, &mut vector); + assert_vec_compare(&vector, &target, I32F32::from_num(0)); + let mask: Vec = vec![true, true, true]; + let mut vector: Vec = vec_to_fixed(&[0., 1., 2.]); + let target: Vec = vec_to_fixed(&[0., 0., 0.]); + inplace_mask_vector(&mask, &mut vector); + assert_vec_compare(&vector, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_inplace_mask_matrix() { + let mask: Vec> = vec![ + vec![false, false, false], + vec![false, false, false], + vec![false, false, false], + ]; + let vector: Vec = vec![0., 1., 2., 3., 4., 5., 6., 7., 8.]; + let mut mat = vec_to_mat_fixed(&vector, 3, false); + inplace_mask_matrix(&mask, &mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&vector, 3, false), + I32F32::from_num(0), + ); + let mask: Vec> = vec![ + vec![true, false, false], + vec![false, true, false], + vec![false, false, true], + ]; + let target: Vec = vec![0., 1., 2., 3., 0., 5., 6., 7., 0.]; + let mut mat = vec_to_mat_fixed(&vector, 3, false); + inplace_mask_matrix(&mask, &mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let mask: Vec> = vec![ + vec![true, true, true], + vec![true, true, true], + vec![true, true, true], + ]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mut mat = vec_to_mat_fixed(&vector, 3, false); + inplace_mask_matrix(&mask, &mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + +#[test] +fn test_math_inplace_mask_rows() { + let input: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let mask: Vec = vec![false, false, false]; + let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let mut mat = vec_to_mat_fixed(&input, 3, false); + inplace_mask_rows(&mask, &mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let mask: Vec = vec![true, true, true]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mut mat = vec_to_mat_fixed(&input, 3, false); + inplace_mask_rows(&mask, &mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let mask: Vec = vec![true, false, true]; + let target: Vec = vec![0., 0., 0., 4., 5., 6., 0., 0., 0.]; + let mut mat = vec_to_mat_fixed(&input, 3, false); + inplace_mask_rows(&mask, &mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let input: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mut mat = vec_to_mat_fixed(&input, 3, false); + let mask: Vec = vec![false, false, false]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + inplace_mask_rows(&mask, &mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + +#[test] +fn test_math_inplace_mask_diag() { + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; + let mut mat = vec_to_mat_fixed(&vector, 3, false); + inplace_mask_diag(&mut mat); + assert_mat_compare( + &mat, + &vec_to_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + +#[test] +fn test_math_mask_rows_sparse() { + let input: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let mat = vec_to_sparse_mat_fixed(&input, 3, false); + let mask: Vec = vec![false, false, false]; + let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let result = mask_rows_sparse(&mask, &mat); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let mask: Vec = vec![true, true, true]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let result = mask_rows_sparse(&mask, &mat); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let mask: Vec = vec![true, false, true]; + let target: Vec = vec![0., 0., 0., 4., 5., 6., 0., 0., 0.]; + let result = mask_rows_sparse(&mask, &mat); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let input: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat = vec_to_sparse_mat_fixed(&input, 3, false); + let mask: Vec = vec![false, false, false]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let result = mask_rows_sparse(&mask, &mat); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + +#[test] +fn test_math_mask_diag_sparse() { + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let result = mask_diag_sparse(&mat); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let vector: Vec = vec![1., 0., 0., 0., 5., 0., 0., 0., 9.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let result = mask_diag_sparse(&mat); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let result = mask_diag_sparse(&mat); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + +#[test] +fn test_math_vec_mask_sparse_matrix() { + let vector: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]; + let target: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let first_vector: Vec = vec![1, 2, 3]; + let second_vector: Vec = vec![1, 2, 3]; + let result = vec_mask_sparse_matrix(&mat, &first_vector, &second_vector, &|a, b| a == b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let target: Vec = vec![1., 0., 0., 4., 5., 0., 7., 8., 9.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let first_vector: Vec = vec![1, 2, 3]; + let second_vector: Vec = vec![1, 2, 3]; + let result = vec_mask_sparse_matrix(&mat, &first_vector, &second_vector, &|a, b| a < b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); + let vector: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat = vec_to_sparse_mat_fixed(&vector, 3, false); + let first_vector: Vec = vec![1, 2, 3]; + let second_vector: Vec = vec![1, 2, 3]; + let result = vec_mask_sparse_matrix(&mat, &first_vector, &second_vector, &|a, b| a == b); + assert_sparse_mat_compare( + &result, + &vec_to_sparse_mat_fixed(&target, 3, false), + I32F32::from_num(0), + ); +} + +#[test] +fn test_math_row_hadamard() { + let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let result = row_hadamard(&matrix, &vector); + let target: Vec = vec![1., 2., 3., 8., 10., 12., 21., 24., 27., 40., 44., 48.]; + let target = vec_to_mat_fixed(&target, 4, false); + assert_mat_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_row_hadamard_sparse() { + let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = row_hadamard_sparse(&matrix, &vector); + let target: Vec = vec![1., 2., 3., 8., 10., 12., 21., 24., 27., 40., 44., 48.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = row_hadamard_sparse(&matrix, &vector); + let target: Vec = vec![0., 2., 3., 8., 0., 12., 21., 24., 0., 40., 44., 48.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = row_hadamard_sparse(&matrix, &vector); + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_row_sum() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let result = row_sum(&matrix); + let target: Vec = vec_to_fixed(&[6., 15., 24., 33.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_row_sum_sparse() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = row_sum_sparse(&matrix); + let target: Vec = vec_to_fixed(&[6., 15., 24., 33.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = row_sum_sparse(&matrix); + let target: Vec = vec_to_fixed(&[5., 10., 15., 33.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![1., 2., 3., 0., 0., 0., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = row_sum_sparse(&matrix); + let target: Vec = vec_to_fixed(&[6., 0., 24., 33.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = row_sum_sparse(&matrix); + let target: Vec = vec_to_fixed(&[0., 0., 0., 0.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_col_sum() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let result = col_sum(&matrix); + let target: Vec = vec_to_fixed(&[22., 26., 30.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_col_sum_sparse() { + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = col_sum_sparse(&matrix, 3); + let target: Vec = vec_to_fixed(&[22., 26., 30.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = col_sum_sparse(&matrix, 3); + let target: Vec = vec_to_fixed(&[21., 21., 21.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![1., 0., 3., 4., 0., 6., 7., 0., 9., 10., 0., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = col_sum_sparse(&matrix, 3); + let target: Vec = vec_to_fixed(&[22., 0., 30.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = col_sum_sparse(&matrix, 3); + let target: Vec = vec_to_fixed(&[0., 0., 0.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_matmul() { + let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let result = matmul(&matrix, &vector); + let target: Vec = vec_to_fixed(&[70., 80., 90.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_matmul_transpose() { + let vector: Vec = vec_to_fixed(&[1., 2., 3.]); + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let result = matmul_transpose(&matrix, &vector); + let target: Vec = vec_to_fixed(&[14., 32., 50., 68.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_sparse_matmul() { + let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = matmul_sparse(&matrix, &vector, 3); + let target: Vec = vec_to_fixed(&[70., 80., 90.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = matmul_sparse(&matrix, &vector, 3); + let target: Vec = vec_to_fixed(&[69., 70., 63.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = matmul_sparse(&matrix, &vector, 3); + let target: Vec = vec_to_fixed(&[0., 0., 0.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_sparse_matmul_transpose() { + let vector: Vec = vec_to_fixed(&[1., 2., 3.]); + let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = matmul_transpose_sparse(&matrix, &vector); + let target: Vec = vec_to_fixed(&[14., 32., 50., 68.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = matmul_transpose_sparse(&matrix, &vector); + let target: Vec = vec_to_fixed(&[13., 22., 23., 68.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let result = matmul_transpose_sparse(&matrix, &vector); + let target: Vec = vec_to_fixed(&[0., 0., 0., 0.]); + assert_vec_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_inplace_col_clip() { + let vector: Vec = vec_to_fixed(&[0., 5., 12.]); + let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let mut matrix = vec_to_mat_fixed(&matrix, 4, false); + let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 5., 9., 0., 5., 12.]; + let target = vec_to_mat_fixed(&target, 4, false); + inplace_col_clip(&mut matrix, &vector); + assert_mat_compare(&matrix, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_col_clip_sparse() { + let vector: Vec = vec_to_fixed(&[0., 5., 12.]); + let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 5., 9., 0., 5., 12.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = col_clip_sparse(&matrix, &vector); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 0., 0., 0., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let target: Vec = vec![0., 2., 3., 0., 5., 6., 0., 0., 0., 0., 5., 12.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = col_clip_sparse(&matrix, &vector); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); + let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = col_clip_sparse(&matrix, &vector); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_clip_sparse() { + let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); + let target: Vec = vec![0., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = clip_sparse( + &matrix, + I32F32::from_num(8), + I32F32::from_num(100), + I32F32::from_num(1), + ); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_clip() { + let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let matrix = vec_to_mat_fixed(&matrix, 4, false); + let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; + let target = vec_to_mat_fixed(&target, 4, false); + let result = clip( + &matrix, + I32F32::from_num(8), + I32F32::from_num(100), + I32F32::from_num(1), + ); + assert_mat_compare(&result, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_inplace_clip() { + let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let mut matrix = vec_to_mat_fixed(&matrix, 4, false); + let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; + let target = vec_to_mat_fixed(&target, 4, false); + inplace_clip( + &mut matrix, + I32F32::from_num(8), + I32F32::from_num(100), + I32F32::from_num(1), + ); + assert_mat_compare(&matrix, &target, I32F32::from_num(0)); +} + +#[test] +fn test_math_weighted_median() { + let mut rng = thread_rng(); + let zero: I32F32 = fixed(0.); + let one: I32F32 = fixed(1.); + for _ in 0..100 { + let stake: Vec = vec_to_fixed(&[]); + let score: Vec = vec_to_fixed(&[]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + zero, + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = normalize(&vec_to_fixed(&[0.51])); + let score: Vec = vec_to_fixed(&[1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + one, + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.49, 0.51]); + let score: Vec = vec_to_fixed(&[0.5, 1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + one, + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.51, 0.49]); + let score: Vec = vec_to_fixed(&[0.5, 1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + fixed(0.5), + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.49, 0., 0.51]); + let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + one, + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.49, 0.01, 0.5]); + let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + fixed(0.7), + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.49, 0.51, 0.0]); + let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + fixed(0.7), + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.0, 0.49, 0.51]); + let score: Vec = vec_to_fixed(&[0.5, 0.7, 1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + one, + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.0, 0.49, 0.0, 0.51]); + let score: Vec = vec_to_fixed(&[0.5, 0.5, 1., 1.]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + one, + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.0, 0.49, 0.0, 0.51, 0.0]); + let score: Vec = vec_to_fixed(&[0.5, 0.5, 1., 1., 0.5]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + one, + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.2, 0.2, 0.2, 0.2, 0.2]); + let score: Vec = vec_to_fixed(&[0.8, 0.2, 1., 0.6, 0.4]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + fixed(0.6), + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let stake: Vec = vec_to_fixed(&[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]); + let score: Vec = vec_to_fixed(&[0.8, 0.8, 0.2, 0.2, 1.0, 1.0, 0.6, 0.6, 0.4, 0.4]); + let majority: I32F32 = fixed(0.51); + assert_eq!( + fixed(0.6), + weighted_median( + &stake, + &score, + (0..stake.len()).collect::>().as_slice(), + one - majority, + zero, + stake.iter().sum() + ) + ); + + let n: usize = 100; + for majority in vec_to_fixed(&[ + 0., 0.0000001, 0.25, 0.49, 0.49, 0.49, 0.5, 0.51, 0.51, 0.51, 0.9999999, 1., + ]) { + for allow_equal in [false, true] { + let mut stake: Vec = vec![]; + let mut score: Vec = vec![]; + let mut last_score: I32F32 = zero; + for i in 0..n { + if allow_equal { + match rng.gen_range(0..2) { + 1 => stake.push(one), + _ => stake.push(zero), + } + if rng.gen_range(0..2) == 1 { + last_score += one + } + score.push(last_score); + } else { + stake.push(one); + score.push(I32F32::from_num(i)); + } + } + inplace_normalize(&mut stake); + let total_stake: I32F32 = stake.iter().sum(); + let mut minority: I32F32 = total_stake - majority; + if minority < zero { + minority = zero; + } + let mut medians: Vec = vec![]; + let mut median_stake: I32F32 = zero; + let mut median_set = false; + let mut stake_sum: I32F32 = zero; + for i in 0..n { + stake_sum += stake[i]; + if !median_set && stake_sum >= minority { + median_stake = stake_sum; + median_set = true; + } + if median_set { + if median_stake < stake_sum { + if median_stake == minority && !medians.contains(&score[i]) { + medians.push(score[i]); + } + break; + } + if !medians.contains(&score[i]) { + medians.push(score[i]); + } + } + } + if medians.is_empty() { + medians.push(zero); + } + let stake_idx: Vec = (0..stake.len()).collect(); + let result: I32F32 = + weighted_median(&stake, &score, &stake_idx, minority, zero, total_stake); + assert!(medians.contains(&result)); + for _ in 0..10 { + let mut permuted_uids: Vec = (0..n).collect(); + permuted_uids.shuffle(&mut thread_rng()); + stake = permuted_uids.iter().map(|&i| stake[i]).collect(); + score = permuted_uids.iter().map(|&i| score[i]).collect(); + let result: I32F32 = + weighted_median(&stake, &score, &stake_idx, minority, zero, total_stake); + assert!(medians.contains(&result)); + } + } + } + } +} + +#[test] +fn test_math_weighted_median_col() { + let stake: Vec = vec_to_fixed(&[]); + let weights: Vec> = vec![vec![]]; + let median: Vec = vec_to_fixed(&[]); + assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.5))); + + let stake: Vec = vec_to_fixed(&[0., 0.]); + let weights: Vec = vec![0., 0., 0., 0.]; + let weights: Vec> = vec_to_mat_fixed(&weights, 2, false); + let median: Vec = vec_to_fixed(&[0., 0.]); + assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.5))); + + let stake: Vec = vec_to_fixed(&[0., 0.75, 0.25, 0.]); + let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0.4, 0.5]; + let weights: Vec> = vec_to_mat_fixed(&weights, 4, false); + let median: Vec = vec_to_fixed(&[0., 0.3, 0.4]); + assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.24))); + let median: Vec = vec_to_fixed(&[0., 0.2, 0.4]); + assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.26))); + let median: Vec = vec_to_fixed(&[0., 0.2, 0.1]); + assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.76))); + + let stake: Vec = vec_to_fixed(&[0., 0.3, 0.2, 0.5]); + let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0., 0.5]; + let weights: Vec> = vec_to_mat_fixed(&weights, 4, false); + let median: Vec = vec_to_fixed(&[0., 0., 0.4]); + assert_eq!(median, weighted_median_col(&stake, &weights, fixed(0.51))); +} + +#[test] +fn test_math_weighted_median_col_sparse() { + let stake: Vec = vec_to_fixed(&[]); + let weights: Vec> = vec![vec![]]; + let median: Vec = vec_to_fixed(&[]); + assert_eq!( + median, + weighted_median_col_sparse(&stake, &weights, 0, fixed(0.5)) + ); + + let stake: Vec = vec_to_fixed(&[0., 0.]); + let weights: Vec = vec![0., 0., 0., 0.]; + let weights: Vec> = vec_to_sparse_mat_fixed(&weights, 2, false); + let median: Vec = vec_to_fixed(&[0., 0.]); + assert_eq!( + median, + weighted_median_col_sparse(&stake, &weights, 2, fixed(0.5)) + ); + + let stake: Vec = vec_to_fixed(&[0., 0.75, 0.25, 0.]); + let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0.4, 0.5]; + let weights: Vec> = vec_to_sparse_mat_fixed(&weights, 4, false); + let median: Vec = vec_to_fixed(&[0., 0.3, 0.4]); + assert_eq!( + median, + weighted_median_col_sparse(&stake, &weights, 3, fixed(0.24)) + ); + let median: Vec = vec_to_fixed(&[0., 0.2, 0.4]); + assert_eq!( + median, + weighted_median_col_sparse(&stake, &weights, 3, fixed(0.26)) + ); + let median: Vec = vec_to_fixed(&[0., 0.2, 0.1]); + assert_eq!( + median, + weighted_median_col_sparse(&stake, &weights, 3, fixed(0.76)) + ); + + let stake: Vec = vec_to_fixed(&[0., 0.3, 0.2, 0.5]); + let weights: Vec = vec![0., 0.1, 0., 0., 0.2, 0.4, 0., 0.3, 0.1, 0., 0., 0.5]; + let weights: Vec> = vec_to_sparse_mat_fixed(&weights, 4, false); + let median: Vec = vec_to_fixed(&[0., 0., 0.4]); + assert_eq!( + median, + weighted_median_col_sparse(&stake, &weights, 3, fixed(0.51)) + ); +} + +#[test] +fn test_math_hadamard() { + let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let mat1: Vec = vec![ + 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + ]; + let target: Vec = vec![ + 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., + ]; + let mat2 = vec_to_mat_fixed(&mat2, 4, false); + let mat1 = vec_to_mat_fixed(&mat1, 4, false); + let target = vec_to_mat_fixed(&target, 4, false); + let result = hadamard(&mat1, &mat2); + assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat2 = vec_to_mat_fixed(&mat2, 4, false); + let mat1 = vec_to_mat_fixed(&mat1, 4, false); + let target = vec_to_mat_fixed(&target, 4, false); + let result = hadamard(&mat1, &mat2); + assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; + let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; + let mat2 = vec_to_mat_fixed(&mat2, 4, false); + let mat1 = vec_to_mat_fixed(&mat1, 4, false); + let target = vec_to_mat_fixed(&target, 4, false); + let result = hadamard(&mat1, &mat2); + assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); +} + +#[test] +fn test_math_hadamard_sparse() { + let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let mat1: Vec = vec![ + 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + ]; + let target: Vec = vec![ + 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., + ]; + let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); + let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = hadamard_sparse(&mat1, &mat2, 3); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); + let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = hadamard_sparse(&mat1, &mat2, 3); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; + let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; + let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); + let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = hadamard_sparse(&mat1, &mat2, 3); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); +} + +#[test] +fn test_math_mat_ema() { + let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let new: Vec = vec![ + 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + ]; + let target: Vec = vec![ + 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, + ]; + let old = vec_to_mat_fixed(&old, 4, false); + let new = vec_to_mat_fixed(&new, 4, false); + let target = vec_to_mat_fixed(&target, 4, false); + let result = mat_ema(&new, &old, I32F32::from_num(0.1)); + assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let new: Vec = vec![ + 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + ]; + let target: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let old = vec_to_mat_fixed(&old, 4, false); + let new = vec_to_mat_fixed(&new, 4, false); + let target = vec_to_mat_fixed(&target, 4, false); + let result = mat_ema(&new, &old, I32F32::from_num(0)); + assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let new: Vec = vec![ + 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + ]; + let target: Vec = vec![ + 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + ]; + let old = vec_to_mat_fixed(&old, 4, false); + let new = vec_to_mat_fixed(&new, 4, false); + let target = vec_to_mat_fixed(&target, 4, false); + let result = mat_ema(&new, &old, I32F32::from_num(1)); + assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); +} + +#[test] +fn test_math_sparse_mat_ema() { + let old: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; + let new: Vec = vec![ + 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., + ]; + let target: Vec = vec![ + 1.9, 3.8, 5.7, 7.6, 9.5, 11.4, 13.3, 15.2, 17.1, 19., 20.9, 22.8, + ]; + let old = vec_to_sparse_mat_fixed(&old, 4, false); + let new = vec_to_sparse_mat_fixed(&new, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let old: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; + let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; + let target: Vec = vec![1., 3.8, 2.7, 7.6, 0., 11.4, 6.3, 15.2, 9., 19., 20.9, 22.8]; + let old = vec_to_sparse_mat_fixed(&old, 4, false); + let new = vec_to_sparse_mat_fixed(&new, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let new: Vec = vec![10., 20., 0., 40., 0., 60., 0., 80., 90., 100., 110., 120.]; + let target: Vec = vec![1., 2., 0., 4., 0., 6., 0., 8., 9., 10., 11., 12.]; + let old = vec_to_sparse_mat_fixed(&old, 4, false); + let new = vec_to_sparse_mat_fixed(&new, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let old: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let new: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let old = vec_to_sparse_mat_fixed(&old, 4, false); + let new = vec_to_sparse_mat_fixed(&new, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); + let old: Vec = vec![1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; + let new: Vec = vec![0., 0., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0.]; + let target: Vec = vec![0.9, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.]; + let old = vec_to_sparse_mat_fixed(&old, 4, false); + let new = vec_to_sparse_mat_fixed(&new, 4, false); + let target = vec_to_sparse_mat_fixed(&target, 4, false); + let result = mat_ema_sparse(&new, &old, I32F32::from_num(0.1)); + assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); +} + +#[test] +fn test_math_matmul2() { + let epsilon: I32F32 = I32F32::from_num(0.0001); + let w: Vec> = vec![vec![I32F32::from_num(1.0); 3]; 3]; + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(1.0); 3]), + &[ + I32F32::from_num(3), + I32F32::from_num(3), + I32F32::from_num(3), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(2.0); 3]), + &[ + I32F32::from_num(6), + I32F32::from_num(6), + I32F32::from_num(6), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(3.0); 3]), + &[ + I32F32::from_num(9), + I32F32::from_num(9), + I32F32::from_num(9), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(-1.0); 3]), + &[ + I32F32::from_num(-3), + I32F32::from_num(-3), + I32F32::from_num(-3), + ], + epsilon, + ); + let w: Vec> = vec![vec![I32F32::from_num(-1.0); 3]; 3]; + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(1.0); 3]), + &[ + I32F32::from_num(-3), + I32F32::from_num(-3), + I32F32::from_num(-3), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(2.0); 3]), + &[ + I32F32::from_num(-6), + I32F32::from_num(-6), + I32F32::from_num(-6), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(3.0); 3]), + &[ + I32F32::from_num(-9), + I32F32::from_num(-9), + I32F32::from_num(-9), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(-1.0); 3]), + &[ + I32F32::from_num(3), + I32F32::from_num(3), + I32F32::from_num(3), + ], + epsilon, + ); + let w: Vec> = vec![ + vec![I32F32::from_num(1.0); 3], + vec![I32F32::from_num(2.0); 3], + vec![I32F32::from_num(3.0); 3], + ]; + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(0.0); 3]), + &[ + I32F32::from_num(0.0), + I32F32::from_num(0.0), + I32F32::from_num(0.0), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(2.0); 3]), + &[ + I32F32::from_num(12), + I32F32::from_num(12), + I32F32::from_num(12), + ], + epsilon, + ); + let w: Vec> = vec![ + vec![ + I32F32::from_num(1), + I32F32::from_num(2), + I32F32::from_num(3) + ]; + 3 + ]; + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(0.0); 3]), + &[ + I32F32::from_num(0.0), + I32F32::from_num(0.0), + I32F32::from_num(0.0), + ], + epsilon, + ); + assert_vec_compare( + &matmul(&w, &[I32F32::from_num(2.0); 3]), + &[ + I32F32::from_num(6), + I32F32::from_num(12), + I32F32::from_num(18), + ], + epsilon, + ); +} + +#[test] +fn test_math_fixed_to_u16() { + let expected = u16::MIN; + assert_eq!(fixed_to_u16(I32F32::from_num(expected)), expected); + + let expected = u16::MAX / 2; + assert_eq!(fixed_to_u16(I32F32::from_num(expected)), expected); + + let expected = u16::MAX; + assert_eq!(fixed_to_u16(I32F32::from_num(expected)), expected); +} + +#[test] +#[should_panic(expected = "overflow")] +fn test_math_fixed_to_u16_panics() { + let bad_input = I32F32::from_num(u32::MAX); + fixed_to_u16(bad_input); + + let bad_input = I32F32::from_num(-1); + fixed_to_u16(bad_input); +} + +// TODO: Investigate why `I32F32` and not `I64F64` +#[test] +fn test_math_fixed_to_u64() { + let expected = u64::MIN; + assert_eq!(fixed_to_u64(I32F32::from_num(expected)), expected); + + // let expected = u64::MAX / 2; + // assert_eq!(fixed_to_u64(I32F32::from_num(expected)), expected); + + // let expected = u64::MAX; + // assert_eq!(fixed_to_u64(I32F32::from_num(expected)), expected); +} + +#[test] +#[should_panic(expected = "-1 overflows")] +fn test_math_fixed_to_u64_panics() { + let bad_input = I32F32::from_num(-1); + fixed_to_u64(bad_input); +} + +#[test] +fn test_math_fixed64_to_u64() { + let expected = u64::MIN; + assert_eq!(fixed64_to_u64(I64F64::from_num(expected)), expected); + + let input = i64::MAX / 2; + let expected = u64::try_from(input).unwrap(); + assert_eq!(fixed64_to_u64(I64F64::from_num(input)), expected); + + let input = i64::MAX; + let expected = u64::try_from(input).unwrap(); + assert_eq!(fixed64_to_u64(I64F64::from_num(input)), expected); +} + +#[test] +#[should_panic(expected = "-1 overflows")] +fn test_math_fixed64_to_u64_panics() { + let bad_input = I64F64::from_num(-1); + fixed64_to_u64(bad_input); +} + +/* @TODO: find the _true_ max, and half, input values */ +#[test] +fn test_math_fixed64_to_fixed32() { + let input = u64::MIN; + let expected = u32::try_from(input).unwrap(); + assert_eq!(fixed64_to_fixed32(I64F64::from_num(expected)), expected); + + let expected = u32::MAX / 2; + let input = u64::from(expected); + assert_eq!(fixed64_to_fixed32(I64F64::from_num(input)), expected); +} + +#[test] +#[should_panic(expected = "overflow")] +fn test_math_fixed64_to_fixed32_panics() { + let bad_input = I64F64::from_num(u32::MAX); + fixed64_to_fixed32(bad_input); +} + +#[test] +fn test_math_u16_to_fixed() { + let input = u16::MIN; + let expected = I32F32::from_num(input); + assert_eq!(u16_to_fixed(input), expected); + + let input = u16::MAX / 2; + let expected = I32F32::from_num(input); + assert_eq!(u16_to_fixed(input), expected); + + let input = u16::MAX; + let expected = I32F32::from_num(input); + assert_eq!(u16_to_fixed(input), expected); +} + +#[test] +fn test_math_u16_proportion_to_fixed() { + let input = u16::MIN; + let expected = I32F32::from_num(input); + assert_eq!(u16_proportion_to_fixed(input), expected); +} + +#[test] +fn test_fixed_proportion_to_u16() { + let expected = u16::MIN; + let input = I32F32::from_num(expected); + assert_eq!(fixed_proportion_to_u16(input), expected); +} + +#[test] +#[should_panic(expected = "overflow")] +fn test_fixed_proportion_to_u16_panics() { + let expected = u16::MAX; + let input = I32F32::from_num(expected); + log::trace!("Testing with input: {:?}", input); // Debug output + let result = fixed_proportion_to_u16(input); + log::trace!("Testing with result: {:?}", result); // Debug output +} +#[test] +fn test_vec_fixed64_to_fixed32() { + let input = vec![I64F64::from_num(i32::MIN)]; + let expected = vec![I32F32::from_num(i32::MIN)]; + assert_eq!(vec_fixed64_to_fixed32(input), expected); + + let input = vec![I64F64::from_num(i32::MAX)]; + let expected = vec![I32F32::from_num(i32::MAX)]; + assert_eq!(vec_fixed64_to_fixed32(input), expected); +} + +#[test] +#[should_panic(expected = "overflow")] +fn test_vec_fixed64_to_fixed32_panics() { + let bad_input = vec![I64F64::from_num(i64::MAX)]; + vec_fixed64_to_fixed32(bad_input); +} + +#[test] +#[allow(arithmetic_overflow)] +fn test_checked_sum() { + let overflowing_input = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, u64::MAX]; + // Expect None when overflow occurs + assert_eq!(checked_sum(&overflowing_input), None); + + let normal_input = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + // Expect Some when no overflow occurs + assert_eq!(checked_sum(&normal_input), Some(55)); + + let empty_input: Vec = vec![]; + // Expect Some(u16::default()) when input is empty + assert_eq!(checked_sum(&empty_input), Some(u16::default())); + + let single_input = vec![1]; + // Expect Some(...) when input is a single value + assert_eq!(checked_sum(&single_input), Some(1)); +} + +#[test] +fn test_mat_ema_alpha_vec_sparse_empty() { + let new: Vec> = Vec::new(); + let old: Vec> = Vec::new(); + let alpha: Vec = Vec::new(); + let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + assert_eq!(result, Vec::>::new()); +} + +#[test] +fn test_mat_ema_alpha_vec_sparse_single_element() { + let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; + let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; + let alpha: Vec = vec![I32F32::from_num(0.5)]; + let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + assert_eq!(result, vec![vec![(0, I32F32::from_num(1.5))]]); +} + +#[test] +fn test_mat_ema_alpha_vec_sparse_multiple_elements() { + let new: Vec> = vec![ + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(2.0))], + vec![(0, I32F32::from_num(3.0)), (1, I32F32::from_num(4.0))], + ]; + let old: Vec> = vec![ + vec![(0, I32F32::from_num(5.0)), (1, I32F32::from_num(6.0))], + vec![(0, I32F32::from_num(7.0)), (1, I32F32::from_num(8.0))], + ]; + let alpha: Vec = vec![I32F32::from_num(0.1), I32F32::from_num(0.2)]; + let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + let expected = vec![ + vec![(0, I32F32::from_num(4.6)), (1, I32F32::from_num(5.2))], + vec![(0, I32F32::from_num(6.6)), (1, I32F32::from_num(7.2))], + ]; + assert_sparse_mat_compare(&result, &expected, I32F32::from_num(0.000001)); +} + +#[test] +fn test_mat_ema_alpha_vec_sparse_zero_alpha() { + let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; + let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; + let alpha: Vec = vec![I32F32::from_num(0.0)]; + let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + assert_eq!(result, vec![vec![(0, I32F32::from_num(2.0))]]); +} + +#[test] +fn test_mat_ema_alpha_vec_sparse_one_alpha() { + let new: Vec> = vec![vec![(0, I32F32::from_num(1.0))]]; + let old: Vec> = vec![vec![(0, I32F32::from_num(2.0))]]; + let alpha: Vec = vec![I32F32::from_num(1.0)]; + let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + assert_eq!(result, vec![vec![(0, I32F32::from_num(1.0))]]); +} + +#[test] +fn test_mat_ema_alpha_vec_sparse_mixed_alpha() { + let new: Vec> = vec![ + vec![(0, I32F32::from_num(1.0)), (1, I32F32::from_num(2.0))], + vec![(0, I32F32::from_num(3.0)), (1, I32F32::from_num(4.0))], + ]; + let old: Vec> = vec![ + vec![(0, I32F32::from_num(5.0)), (1, I32F32::from_num(6.0))], + vec![(0, I32F32::from_num(7.0)), (1, I32F32::from_num(8.0))], + ]; + let alpha: Vec = vec![I32F32::from_num(0.3), I32F32::from_num(0.7)]; + let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + assert_sparse_mat_compare( + &result, + &vec![ + vec![(0, I32F32::from_num(3.8)), (1, I32F32::from_num(3.2))], + vec![(0, I32F32::from_num(5.8)), (1, I32F32::from_num(5.2))], + ], + I32F32::from_num(0.000001), + ); +} + +#[test] +fn test_mat_ema_alpha_vec_sparse_sparse_matrix() { + let new: Vec> = vec![ + vec![(0, I32F32::from_num(1.0))], + vec![(1, I32F32::from_num(4.0))], + ]; + let old: Vec> = vec![ + vec![(0, I32F32::from_num(5.0))], + vec![(1, I32F32::from_num(8.0))], + ]; + let alpha: Vec = vec![I32F32::from_num(0.5), I32F32::from_num(0.5)]; + let result = mat_ema_alpha_vec_sparse(&new, &old, &alpha); + assert_eq!( + result, + vec![ + vec![(0, I32F32::from_num(3.0))], + vec![(1, I32F32::from_num(6.0))] + ] + ); +} + +#[test] +fn test_mat_ema_alpha_vec_basic() { + let new = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); + let old = mat_to_fixed(&[vec![0.5, 1.5, 2.5], vec![3.5, 4.5, 5.5]]); + let alpha = vec![ + I32F32::from_num(0.5), + I32F32::from_num(0.5), + I32F32::from_num(0.5), + ]; + let expected = mat_to_fixed(&[vec![0.75, 1.75, 2.75], vec![3.75, 4.75, 5.75]]); + let result = mat_ema_alpha_vec(&new, &old, &alpha); + assert_eq!(result, expected); +} + +#[test] +fn test_mat_ema_alpha_vec_varying_alpha() { + let new = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); + let old = mat_to_fixed(&[vec![0.5, 1.5, 2.5], vec![3.5, 4.5, 5.5]]); + let alpha = vec![ + I32F32::from_num(0.2), + I32F32::from_num(0.5), + I32F32::from_num(0.8), + ]; + let expected = mat_to_fixed(&[vec![0.6, 1.75, 2.9], vec![3.6, 4.75, 5.9]]); + let result = mat_ema_alpha_vec(&new, &old, &alpha); + assert_mat_approx_eq(&result, &expected, I32F32::from_num(1e-6)); +} + +#[test] +fn test_mat_ema_alpha_vec_empty_matrices() { + let new: Vec> = vec![]; + let old: Vec> = vec![]; + let alpha: Vec = vec![]; + let expected: Vec> = vec![vec![]; 1]; + let result = mat_ema_alpha_vec(&new, &old, &alpha); + assert_eq!(result, expected); +} + +#[test] +fn test_mat_ema_alpha_vec_single_element() { + let new = mat_to_fixed(&[vec![1.0]]); + let old = mat_to_fixed(&[vec![0.5]]); + let alpha = vec![I32F32::from_num(0.5)]; + let expected = mat_to_fixed(&[vec![0.75]]); + let result = mat_ema_alpha_vec(&new, &old, &alpha); + assert_eq!(result, expected); +} + +// TODO: (@sd): Should these be non panicking? +#[test] +#[should_panic(expected = "assertion failed")] +fn test_mat_ema_alpha_vec_mismatched_dimensions() { + let new = mat_to_fixed(&[vec![1.0, 2.0], vec![3.0, 4.0]]); + let old = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); + let alpha = vec![ + I32F32::from_num(0.5), + I32F32::from_num(0.5), + I32F32::from_num(0.5), + ]; + let _result = mat_ema_alpha_vec(&new, &old, &alpha); +} + +#[test] +fn test_quantile() { + // Test with a non-empty vector and valid quantile values + let data = vec![ + I32F32::from_num(1.0), + I32F32::from_num(2.0), + I32F32::from_num(3.0), + I32F32::from_num(4.0), + I32F32::from_num(5.0), + ]; + + // Test 0th quantile (minimum) + let result = quantile(&data, 0.0); + assert_eq!(result, I32F32::from_num(1.0)); + + // Test 25th quantile + let result = quantile(&data, 0.25); + assert_eq!(result, I32F32::from_num(2.0)); + + // Test 50th quantile (median) + let result = quantile(&data, 0.5); + assert_eq!(result, I32F32::from_num(3.0)); + + // Test 66th quantile + let result = quantile(&data, 0.66); + assert_eq!(result, I32F32::from_num(3.64)); + + // Test 75th quantile + let result = quantile(&data, 0.75); + assert_eq!(result, I32F32::from_num(4.0)); + + // Test 100th quantile (maximum) + let result = quantile(&data, 1.0); + assert_eq!(result, I32F32::from_num(5.0)); +} diff --git a/pallets/subtensor/tests/mock.rs b/pallets/subtensor/tests/mock.rs index 9995acf84..0356da3af 100644 --- a/pallets/subtensor/tests/mock.rs +++ b/pallets/subtensor/tests/mock.rs @@ -158,6 +158,9 @@ parameter_types! { pub const InitialSubnetLimit: u16 = 10; // Max 10 subnets. pub const InitialNetworkRateLimit: u64 = 0; pub const InitialTargetStakesPerInterval: u16 = 2; + pub const InitialAlphaHigh: u16 = 900; // Represents 0.9 as per the production default + pub const InitialAlphaLow: u16 = 700; // Represents 0.7 as per the production default + pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn } // Configure collective pallet for council @@ -358,6 +361,9 @@ impl pallet_subtensor::Config for Test { type InitialSubnetLimit = InitialSubnetLimit; type InitialNetworkRateLimit = InitialNetworkRateLimit; type InitialTargetStakesPerInterval = InitialTargetStakesPerInterval; + type AlphaHigh = InitialAlphaHigh; + type AlphaLow = InitialAlphaLow; + type LiquidAlphaOn = InitialLiquidAlphaOn; } impl pallet_utility::Config for Test { diff --git a/pallets/subtensor/tests/weights.rs b/pallets/subtensor/tests/weights.rs index bb7f11908..5cd3bf7c2 100644 --- a/pallets/subtensor/tests/weights.rs +++ b/pallets/subtensor/tests/weights.rs @@ -1,13 +1,14 @@ mod mock; use frame_support::{ assert_err, assert_ok, - dispatch::{DispatchClass, DispatchResult, GetDispatchInfo, Pays}, + dispatch::{DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays}, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, }; use mock::*; -use pallet_subtensor::Error; +use pallet_subtensor::{Error, Owner}; use sp_core::{H256, U256}; use sp_runtime::{ - traits::{BlakeTwo256, Hash}, + traits::{BlakeTwo256, DispatchInfoOf, Hash, SignedExtension}, DispatchError, }; use substrate_fixed::types::I32F32; @@ -37,6 +38,103 @@ fn test_set_weights_dispatch_info_ok() { assert_eq!(dispatch_info.pays_fee, Pays::No); }); } +#[test] +#[cfg(not(tarpaulin))] +fn test_set_rootweights_dispatch_info_ok() { + new_test_ext(0).execute_with(|| { + let dests = vec![1, 1]; + let weights = vec![1, 1]; + let netuid: u16 = 1; + let version_key: u64 = 0; + let hotkey: U256 = U256::from(1); // Add the hotkey field + let call = RuntimeCall::SubtensorModule(SubtensorCall::set_root_weights { + netuid, + dests, + weights, + version_key, + hotkey, // Include the hotkey field + }); + let dispatch_info = call.get_dispatch_info(); + + assert_eq!(dispatch_info.class, DispatchClass::Normal); + assert_eq!(dispatch_info.pays_fee, Pays::No); + }); +} + +#[test] +fn test_set_rootweights_validate() { + // Testing the signed extension validate function + // correctly filters this transaction. + + new_test_ext(0).execute_with(|| { + let dests = vec![1, 1]; + let weights = vec![1, 1]; + let netuid: u16 = 1; + let version_key: u64 = 0; + let coldkey = U256::from(0); + let hotkey: U256 = U256::from(1); // Add the hotkey field + assert_ne!(hotkey, coldkey); // Ensure hotkey is NOT the same as coldkey !!! + + let who = coldkey; // The coldkey signs this transaction + + let call = RuntimeCall::SubtensorModule(SubtensorCall::set_root_weights { + netuid, + dests, + weights, + version_key, + hotkey, // Include the hotkey field + }); + + // Create netuid + add_network(netuid, 0, 0); + // Register the hotkey + SubtensorModule::append_neuron(netuid, &hotkey, 0); + Owner::::insert(hotkey, coldkey); + + let min_stake = 500_000_000_000; + // Set the minimum stake + SubtensorModule::set_weights_min_stake(min_stake); + + // Verify stake is less than minimum + assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); + + let extension = pallet_subtensor::SubtensorSignedExtension::::new(); + // Submit to the signed extension validate function + let result_no_stake = extension.validate(&who, &call.clone(), &info, 10); + // Should fail + assert_err!( + // Should get an invalid transaction error + result_no_stake, + TransactionValidityError::Invalid(InvalidTransaction::Call,) + ); + + // Increase the stake to be equal to the minimum + SubtensorModule::increase_stake_on_hotkey_account(&hotkey, min_stake); + + // Verify stake is equal to minimum + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + min_stake + ); + + // Submit to the signed extension validate function + let result_min_stake = extension.validate(&who, &call.clone(), &info, 10); + // Now the call should pass + assert_ok!(result_min_stake); + + // Try with more stake than minimum + SubtensorModule::increase_stake_on_hotkey_account(&hotkey, 1); + + // Verify stake is more than minimum + assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) > min_stake); + + let result_more_stake = extension.validate(&who, &call.clone(), &info, 10); + // The call should still pass + assert_ok!(result_more_stake); + }); +} #[test] fn test_commit_weights_dispatch_info_ok() { @@ -62,6 +160,82 @@ fn test_commit_weights_dispatch_info_ok() { }); } +#[test] +fn test_commit_weights_validate() { + // Testing the signed extension validate function + // correctly filters this transaction. + + new_test_ext(0).execute_with(|| { + let dests = vec![1, 1]; + let weights = vec![1, 1]; + let netuid: u16 = 1; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let coldkey = U256::from(0); + let hotkey: U256 = U256::from(1); // Add the hotkey field + assert_ne!(hotkey, coldkey); // Ensure hotkey is NOT the same as coldkey !!! + + let who = hotkey; // The hotkey signs this transaction + + let commit_hash: H256 = + BlakeTwo256::hash_of(&(hotkey, netuid, dests, weights, salt, version_key)); + + let call = RuntimeCall::SubtensorModule(SubtensorCall::commit_weights { + netuid, + commit_hash, + }); + + // Create netuid + add_network(netuid, 0, 0); + // Register the hotkey + SubtensorModule::append_neuron(netuid, &hotkey, 0); + Owner::::insert(hotkey, coldkey); + + let min_stake = 500_000_000_000; + // Set the minimum stake + SubtensorModule::set_weights_min_stake(min_stake); + + // Verify stake is less than minimum + assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); + + let extension = pallet_subtensor::SubtensorSignedExtension::::new(); + // Submit to the signed extension validate function + let result_no_stake = extension.validate(&who, &call.clone(), &info, 10); + // Should fail + assert_err!( + // Should get an invalid transaction error + result_no_stake, + TransactionValidityError::Invalid(InvalidTransaction::Call,) + ); + + // Increase the stake to be equal to the minimum + SubtensorModule::increase_stake_on_hotkey_account(&hotkey, min_stake); + + // Verify stake is equal to minimum + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + min_stake + ); + + // Submit to the signed extension validate function + let result_min_stake = extension.validate(&who, &call.clone(), &info, 10); + // Now the call should pass + assert_ok!(result_min_stake); + + // Try with more stake than minimum + SubtensorModule::increase_stake_on_hotkey_account(&hotkey, 1); + + // Verify stake is more than minimum + assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) > min_stake); + + let result_more_stake = extension.validate(&who, &call.clone(), &info, 10); + // The call should still pass + assert_ok!(result_more_stake); + }); +} + #[test] fn test_reveal_weights_dispatch_info_ok() { new_test_ext(0).execute_with(|| { @@ -85,6 +259,82 @@ fn test_reveal_weights_dispatch_info_ok() { }); } +#[test] +fn test_reveal_weights_validate() { + // Testing the signed extension validate function + // correctly filters this transaction. + + new_test_ext(0).execute_with(|| { + let dests = vec![1, 1]; + let weights = vec![1, 1]; + let netuid: u16 = 1; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let coldkey = U256::from(0); + let hotkey: U256 = U256::from(1); // Add the hotkey field + assert_ne!(hotkey, coldkey); // Ensure hotkey is NOT the same as coldkey !!! + + let who = hotkey; // The hotkey signs this transaction + + let call = RuntimeCall::SubtensorModule(SubtensorCall::reveal_weights { + netuid, + uids: dests, + values: weights, + salt, + version_key, + }); + + // Create netuid + add_network(netuid, 0, 0); + // Register the hotkey + SubtensorModule::append_neuron(netuid, &hotkey, 0); + Owner::::insert(hotkey, coldkey); + + let min_stake = 500_000_000_000; + // Set the minimum stake + SubtensorModule::set_weights_min_stake(min_stake); + + // Verify stake is less than minimum + assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); + + let extension = pallet_subtensor::SubtensorSignedExtension::::new(); + // Submit to the signed extension validate function + let result_no_stake = extension.validate(&who, &call.clone(), &info, 10); + // Should fail + assert_err!( + // Should get an invalid transaction error + result_no_stake, + TransactionValidityError::Invalid(InvalidTransaction::Call,) + ); + + // Increase the stake to be equal to the minimum + SubtensorModule::increase_stake_on_hotkey_account(&hotkey, min_stake); + + // Verify stake is equal to minimum + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + min_stake + ); + + // Submit to the signed extension validate function + let result_min_stake = extension.validate(&who, &call.clone(), &info, 10); + // Now the call should pass + assert_ok!(result_min_stake); + + // Try with more stake than minimum + SubtensorModule::increase_stake_on_hotkey_account(&hotkey, 1); + + // Verify stake is more than minimum + assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) > min_stake); + + let result_more_stake = extension.validate(&who, &call.clone(), &info, 10); + // The call should still pass + assert_ok!(result_more_stake); + }); +} + #[test] fn test_set_weights_is_root_error() { new_test_ext(0).execute_with(|| { diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 36635b4e5..c2d26973b 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -15,6 +15,10 @@ workspace = true [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[[bin]] +name = "spec_version" +path = "src/spec_version.rs" + [dependencies] subtensor-custom-rpc-runtime-api = { version = "0.0.2", path = "../pallets/subtensor/runtime-api", default-features = false } smallvec = { workspace = true } diff --git a/runtime/src/check_nonce.rs b/runtime/src/check_nonce.rs index e6e992ccf..d9948f9b6 100644 --- a/runtime/src/check_nonce.rs +++ b/runtime/src/check_nonce.rs @@ -120,7 +120,7 @@ where priority: 0, requires, provides, - longevity: TransactionLongevity::max_value(), + longevity: TransactionLongevity::MAX, propagate: true, }) } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 5698ccb4d..ac37f5e16 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -135,7 +135,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 186, + spec_version: 152, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -817,6 +817,9 @@ parameter_types! { pub const SubtensorInitialNetworkLockReductionInterval: u64 = 14 * 7200; pub const SubtensorInitialNetworkRateLimit: u64 = 7200; pub const SubtensorInitialTargetStakesPerInterval: u16 = 1; + pub const InitialAlphaHigh: u16 = 900; // Represents 0.9 as per the production default + pub const InitialAlphaLow: u16 = 700; // Represents 0.7 as per the production default + pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn } impl pallet_subtensor::Config for Runtime { @@ -868,6 +871,9 @@ impl pallet_subtensor::Config for Runtime { type InitialSubnetLimit = SubtensorInitialSubnetLimit; type InitialNetworkRateLimit = SubtensorInitialNetworkRateLimit; type InitialTargetStakesPerInterval = SubtensorInitialTargetStakesPerInterval; + type AlphaHigh = InitialAlphaHigh; + type AlphaLow = InitialAlphaLow; + type LiquidAlphaOn = InitialLiquidAlphaOn; } use sp_runtime::BoundedVec; @@ -1142,6 +1148,18 @@ impl fn set_commit_reveal_weights_enabled(netuid: u16, enabled: bool) { SubtensorModule::set_commit_reveal_weights_enabled(netuid, enabled); } + + fn set_alpha_high(netuid: u16, alpha_high: u16) -> Result<(), DispatchError> { + SubtensorModule::set_alpha_high(netuid, alpha_high) + } + + fn set_alpha_low(netuid: u16, alpha_low: u16) -> Result<(), DispatchError> { + SubtensorModule::set_alpha_low(netuid, alpha_low) + } + + fn set_liquid_alpha_enabled(netuid: u16, enabled: bool) { + SubtensorModule::set_liquid_alpha_enabled(netuid, enabled); + } } impl pallet_admin_utils::Config for Runtime { diff --git a/runtime/src/spec_version.rs b/runtime/src/spec_version.rs new file mode 100644 index 000000000..20b75ac04 --- /dev/null +++ b/runtime/src/spec_version.rs @@ -0,0 +1,5 @@ +use node_subtensor_runtime::VERSION; + +fn main() { + println!("{}", VERSION.spec_version); +} diff --git a/scripts/test_specific.sh b/scripts/test_specific.sh index ab06060aa..47cacd27d 100755 --- a/scripts/test_specific.sh +++ b/scripts/test_specific.sh @@ -1,4 +1,4 @@ pallet="${3:-pallet-subtensor}" features="${4:-pow-faucet}" -RUST_LOG=info cargo test --release --features=$features -p $pallet --test $1 -- $2 --nocapture --exact \ No newline at end of file +RUST_LOG=pallet_subtensor=trace cargo test --release --features=$features -p $pallet --test $1 -- $2 --nocapture --exact \ No newline at end of file