diff --git a/.env.example b/.env.example deleted file mode 100644 index afd7574..0000000 --- a/.env.example +++ /dev/null @@ -1,88 +0,0 @@ -# ---------------------------------------------------------------- -# crunch CLI configuration variables -# ---------------------------------------------------------------- -# [CRUNCH_STASHES] Validator stash addresses for which 'crunch flakes', 'crunch rewards' -# or 'crunch view' will be applied. -# If needed specify more than one (e.g. stash_1,stash_2,stash_3). -CRUNCH_STASHES=5GTD7ZeD823BjpmZBCSzBQp7cvHR1Gunq7oDkurZr9zUev2n -# -# [CRUNCH_STASHES_URL] Additionally the list of stashes could be defined and available in a remote file. -# `crunch` will try to fetch the stashes from the endpoint predefined here before triggering the respective payouts -# Please have a look at the file '.remote.stashes.example' as an example -#CRUNCH_STASHES_URL=https://raw.githubusercontent.com/turboflakes/crunch/main/.remote.stashes.example -# -# [CRUNCH_SUBSTRATE_WS_URL] Substrate websocket endpoint for which 'crunch' will try to -# connect. (e.g. wss://kusama-rpc.polkadot.io) (NOTE: substrate_ws_url takes precedence -# than argument) -#CRUNCH_SUBSTRATE_WS_URL=ws://localhost:9944 -# -# [CRUNCH_MAXIMUM_PAYOUTS] Maximum number of unclaimed eras for which an extrinsic payout -# will be submitted. (e.g. a value of 4 means that if there are unclaimed eras in the last -# 84 the maximum unclaimed payout calls for each stash address will be 4). [default: 4] -CRUNCH_MAXIMUM_PAYOUTS=4 -# -# [CRUNCH_MAXIMUM_HISTORY_ERAS] Maximum number of history eras for which crunch will look for -# unclaimed rewards. The maximum value supported is the one defined by constant history_depth -# (e.g. a value of 4 means that crunch will only check in the latest 4 eras if there are any -# unclaimed rewards for each stash address). [default: 4] -CRUNCH_MAXIMUM_HISTORY_ERAS=4 -# -# [CRUNCH_MAXIMUM_CALLS] Maximum number of calls in a single batch. [default: 4] -CRUNCH_MAXIMUM_CALLS=4 -# -# [CRUNCH_SEED_PATH] File path containing the private seed phrase to Sign the extrinsic -# payout call. [default: .private.seed] -#CRUNCH_SEED_PATH=.private.seed.example -# -# ---------------------------------------------------------------- -# Matrix configuration variables -# ---------------------------------------------------------------- -CRUNCH_MATRIX_USER=@your-regular-matrix-account:matrix.org -CRUNCH_MATRIX_BOT_USER=@your-own-crunch-bot-account:matrix.org -# NOTE: type the bot password within "" so that any special character could be parsed correctly into a string. -CRUNCH_MATRIX_BOT_PASSWORD="anotthateasypassword" -# ---------------------------------------------------------------- -# ONE-T configuration variables -# ---------------------------------------------------------------- -# Note: If ONET_API_ENABLED equals true, by default Crunch will try to fetch the validator grade from the respective -# network it is connected to. -#CRUNCH_ONET_API_ENABLED=true -# -# [CRUNCH_ONET_API_URL] Define a custom ONET backend endpoint -#CRUNCH_ONET_API_URL=https://polkadot-onet-api.turboflakes.io -# -# [CRUNCH_ONET_API_KEY] Define a custom ONET api key. -#CRUNCH_ONET_API_KEY=crunch-101 -# -# [CRUNCH_ONET_NUMBER_LAST_SESSIONS] Define the number of last sessions the grade is evaluated. Default is 6. -#CRUNCH_ONET_NUMBER_LAST_SESSIONS=6 -# ---------------------------------------------------------------- -# Nomination Pools configuration variables -# ---------------------------------------------------------------- -# [CRUNCH_POOL_IDS] Additionally the list of stashes could be defined from a single or more Nomination Pool Ids. -# `crunch` will try to fetch the nominees of the respective pool id predefined here before triggering the respective payouts -CRUNCH_POOL_IDS=2 -# -# [CRUNCH_POOL_COMPOUND_THRESHOLD] Define minimum pending rewards threshold in PLANCKS. -# Note: only pending rewards above the threshold are included in the auto-compound batch. -# 1 DOT = 10000000000 PLANCKS -# 1 KSM = 1000000000000 PLANCKS -CRUNCH_POOL_COMPOUND_THRESHOLD=10000000000 -# -# [CRUNCH_POOL_MEMBERS_COMPOUND_ENABLED] Enable auto-compound rewards for every member that belongs to the pools -# previously selected by CRUNCH_POOL_IDS. Note that members have to have their permissions -# set as PermissionlessCompound or PermissionlessAll. -#CRUNCH_POOL_MEMBERS_COMPOUND_ENABLED=true -# -# [CRUNCH_POOL_ONLY_OPERATOR_COMPOUND_ENABLED] Enable auto-compound rewards for the pool operator member that belongs to the pools -# previously selected by CRUNCH_POOL_IDS. Note that operator member account have to have their permissions -# set as PermissionlessCompound or PermissionlessAll. -CRUNCH_POOL_ONLY_OPERATOR_COMPOUND_ENABLED=true -# -# [CRUNCH_POOL_ACTIVE_NOMINEES_PAYOUT_ENABLED] Enable payouts only for ACTIVE nominees assigned to the pools -# previously selected by CRUNCH_POOL_IDS. -#CRUNCH_POOL_ACTIVE_NOMINEES_PAYOUT_ENABLED=true -# -# [CRUNCH_POOL_ALL_NOMINEES_PAYOUT_ENABLED] Enable payouts for ALL nominees assigned to the pools -# previously selected by CRUNCH_POOL_IDS. -#CRUNCH_POOL_ALL_NOMINEES_PAYOUT_ENABLED=true \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..f0d570e --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,25 @@ +--- +version: 2 +updates: + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + open-pull-requests-limit: 5 + rebase-strategy: "disabled" + + # Maintain dependencies for Docker images + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "monthly" + open-pull-requests-limit: 5 + rebase-strategy: "disabled" + + # Maintain dependencies for Rust + # Note: Dependabot can't recursively search directories at the moment + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "monthly" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..7d070f0 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,164 @@ +on: + push: + branches: [main, dev] + pull_request: + +name: CI +permissions: read-all + +jobs: + rustfmt: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Configure rustc version + run: | + RUSTC_VERSION=$(grep channel rust-toolchain.toml | tail -n1 | tr -d " " | cut -f2 -d'"') + echo "RUSTC_VERSION=$RUSTC_VERSION" >> "$GITHUB_ENV" + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ env.RUSTC_VERSION }} + profile: minimal + override: true + components: rustfmt + + - name: Check formatting + uses: actions-rs/cargo@v1 + with: + command: fmt + args: -- --check + + clippy: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Configure rustc version + run: | + RUSTC_VERSION=$(grep channel rust-toolchain.toml | tail -n1 | tr -d " " | cut -f2 -d'"') + echo "RUSTC_VERSION=$RUSTC_VERSION" >> "$GITHUB_ENV" + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ env.RUSTC_VERSION }} + profile: minimal + override: true + components: clippy + - uses: Swatinem/rust-cache@v2 + + - name: Run Clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all-targets --all-features -- -D warnings + + check: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Configure rustc version + run: | + RUSTC_VERSION=$(grep channel rust-toolchain.toml | tail -n1 | tr -d " " | cut -f2 -d'"') + echo "RUSTC_VERSION=$RUSTC_VERSION" >> "$GITHUB_ENV" + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ env.RUSTC_VERSION }} + target: wasm32-unknown-unknown + profile: minimal + override: true + - uses: Swatinem/rust-cache@v2 + + - name: Check Build + run: | + RUSTFLAGS="-D warnings" cargo check --release + + unit-test: + permissions: + pull-requests: write + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Configure rustc version + run: | + RUSTC_VERSION=$(grep channel rust-toolchain.toml | tail -n1 | tr -d " " | cut -f2 -d'"') + echo "RUSTC_VERSION=$RUSTC_VERSION" >> "$GITHUB_ENV" + + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ env.RUSTC_VERSION }} + components: llvm-tools-preview + profile: minimal + override: true + - uses: Swatinem/rust-cache@v2 + + - name: Install cargo-llvm-cov + uses: taiki-e/install-action@v2.32.7 + with: + tool: cargo-llvm-cov + + - name: Execute tests + id: coverage + env: + CRUNCH_CONFIG_FILENAME: environments/cc3/testnet/.env + run: | + PR_NUMBER=$(echo "$GITHUB_REF" | sed "s|refs/pull/||" | sed "s|/merge||") + DESTINATION_PATH="crunch/PR-$PR_NUMBER" + export DESTINATION_PATH + echo "**For full LLVM coverage report [click here](https://staticsitellvmhtml.z13.web.core.windows.net/$DESTINATION_PATH/html/)!**" > uncovered-lines.log + + cargo llvm-cov --workspace --html --show-missing-lines \ + --hide-instantiations --ignore-filename-regex "(tests.rs|mock.rs)" + + UNCOVERED_LINES=$(sed "s|$(pwd)|.|" uncovered-lines.log) + # workaround the fact that GitHub Actions doesn't support multi-line output + # https://trstringer.com/github-actions-multiline-strings/ + UNCOVERED_LINES="${UNCOVERED_LINES//'%'/'%25'}" + UNCOVERED_LINES="${UNCOVERED_LINES//$'\n'/'%0A'}" + UNCOVERED_LINES="${UNCOVERED_LINES//$'\r'/'%0D'}" + echo "uncovered_lines=$UNCOVERED_LINES" >> "$GITHUB_OUTPUT" + + - name: Azure login + if: env.GITHUB_TOKEN + env: + GITHUB_TOKEN: ${{ secrets.CREDITCOIN_GITHUB_API_TOKEN }} + uses: azure/login@v2 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + allow-no-subscriptions: true + + - name: Upload coverage report to Azure Storage + if: env.GITHUB_TOKEN + env: + GITHUB_TOKEN: ${{ secrets.CREDITCOIN_GITHUB_API_TOKEN }} + run: | + AZURE_STORAGE_KEY=${{ secrets.LLVM_AZURE_STORAGE_KEY }} + export AZURE_STORAGE_KEY + PR_NUMBER=$(echo "$GITHUB_REF" | sed "s|refs/pull/||" | sed "s|/merge||") + DESTINATION_PATH="crunch/PR-$PR_NUMBER" + export DESTINATION_PATH + + az storage blob upload-batch --account-name staticsitellvmhtml --auth-mode key -d "\$web" --destination-path "$DESTINATION_PATH" --overwrite -s ./target/llvm-cov + + - name: Post comment to PR + if: env.GITHUB_TOKEN + uses: mshick/add-pr-comment@v2 + env: + GITHUB_TOKEN: ${{ secrets.CREDITCOIN_GITHUB_API_TOKEN }} + with: + message: ${{ steps.coverage.outputs.uncovered_lines }} + allow-repeats: false + + - name: Upload coverage report as artifact + uses: actions/upload-artifact@v4 + with: + name: coverage-report + path: target/llvm-cov/html/ diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml deleted file mode 100644 index e33f056..0000000 --- a/.github/workflows/create_release.yml +++ /dev/null @@ -1,83 +0,0 @@ -on: - push: - # Sequence of patterns matched against refs/tags - tags: - - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 - -name: Rust CI - Create Release - -jobs: - check: - name: Create Release - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - - name: Install Rust latest stable - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - components: rustfmt, clippy - - - name: Run cargo test - uses: actions-rs/cargo@v1 - env: - CRUNCH_CONFIG_FILENAME: .env.example - with: - command: test - - - name: Run cargo build - uses: actions-rs/cargo@v1 - env: - CRUNCH_CONFIG_FILENAME: .env.example - with: - command: build - args: --release - - - name: Generate SHA-256 hash file - run: | - cd ./target/release - sha256sum crunch > crunch.sha256 - - - name: Get Rustc version - id: get_rustc - run: echo ::set-output name=rustc::$(rustc -V) - - - name: Get Tag version - id: get_tag - run: echo ::set-output name=tag::${GITHUB_REF#refs/*/} - - - name: Create release - id: create_release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ steps.get_tag.outputs.tag }} - release_name: Crunch ${{ steps.get_tag.outputs.tag }} - body: "Note: This release was built using `${{ steps.get_rustc.outputs.rustc }}`" - draft: true - prerelease: false - - - name: Upload crunch binary - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./target/release/crunch - asset_name: crunch - asset_content_type: application/octet-stream - - - name: Upload crunch sha256 - uses: actions/upload-release-asset@v1.0.1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./target/release/crunch.sha256 - asset_name: crunch.sha256 - asset_content_type: text/plain - \ No newline at end of file diff --git a/.github/workflows/mega-linter.yml b/.github/workflows/mega-linter.yml new file mode 100644 index 0000000..35dbb5a --- /dev/null +++ b/.github/workflows/mega-linter.yml @@ -0,0 +1,84 @@ +--- +# MegaLinter GitHub Action configuration file +# More info at https://megalinter.io +name: MegaLinter + +on: + pull_request: + branches: [dev] + +permissions: read-all + +env: # Comment env block if you do not want to apply fixes + # Apply linter fixes configuration + APPLY_FIXES: all # When active, APPLY_FIXES must also be defined as environment variable (in github/workflows/mega-linter.yml or other CI tool) + APPLY_FIXES_EVENT: pull_request # Decide which event triggers application of fixes in a commit or a PR (pull_request, push, all) + APPLY_FIXES_MODE: commit # If APPLY_FIXES is used, defines if the fixes are directly committed (commit) or posted in a PR (pull_request) + +concurrency: + group: ${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + build: + name: MegaLinter + runs-on: ubuntu-22.04 + steps: + # Git Checkout + - name: Checkout Code + uses: actions/checkout@v4 + with: + token: ${{ secrets.PAT || secrets.GITHUB_TOKEN }} + fetch-depth: 0 # If you use VALIDATE_ALL_CODEBASE = true, you can remove this line to improve performances + + # MegaLinter + - name: MegaLinter + id: ml + # You can override MegaLinter flavor used to have faster performances + # More info at https://megalinter.io/latest/flavors/ + uses: oxsecurity/megalinter/flavors/rust@v7 + env: + # All available variables are described at https://megalinter.io/latest/configuration/ + # and configured in .mega-linter.yml + VALIDATE_ALL_CODEBASE: true + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # Upload MegaLinter artifacts + - name: Archive production artifacts + if: success() || failure() + uses: actions/upload-artifact@v4 + with: + name: MegaLinter reports + path: | + megalinter-reports + mega-linter.log + + # Create pull request if applicable (for now works only on PR from same repository, not from forks) + - name: Create Pull Request with applied fixes + id: cpr + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'pull_request' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.PAT || secrets.GITHUB_TOKEN }} + commit-message: "[MegaLinter] Apply linters automatic fixes" + title: "[MegaLinter] Apply linters automatic fixes" + labels: bot + - name: Create PR output + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'pull_request' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + run: | + echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" + echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" + + # Push new commit if applicable (for now works only on PR from same repository, not from forks) + - name: Prepare commit + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'commit' && github.ref != 'refs/heads/dev' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + run: | + sudo chown -Rc $UID .git/ + git diff + + - name: Commit and push applied linter fixes + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'commit' && github.ref != 'refs/heads/dev' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + uses: stefanzweifel/git-auto-commit-action@v5 + with: + branch: ${{ github.event.pull_request.head.ref || github.head_ref || github.ref }} + commit_message: "[MegaLinter] Apply linters fixes" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..a0c930e --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,109 @@ +name: Release + +on: + workflow_dispatch: + push: + branches: + - "release/*" + +permissions: read-all + +jobs: + ## not sure if having multiple private seeds break image so splitting it up by network still + devnet: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up account private key + run: | + echo "${{ secrets.DEVNET_CRUNCH_PK }}" >> environments/cc3/devnet/.private.seed + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + file: docker/devnet.dockerfile + push: true + tags: gluwa/crunch:devnet-${{github.run_id}} + + testnet: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up account private key + run: | + echo "${{ secrets.TESTNET_CRUNCH_PK }}" >> environments/cc3/testnet/.private.seed + + - name: Check settings + run: | + ls -a environments/cc3/testnet/ + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + file: docker/testnet.dockerfile + push: true + tags: gluwa/crunch:testnet-${{github.run_id}} + + mainnet: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - shell: bash + env: + CRUNCH_AGENT_PK: ${{ secrets.MAINNET_CRUNCH_PK }} + run: | + touch environments/cc3/mainnet/.private.seed \ + && echo "$CRUNCH_AGENT_PK" > environments/cc3/mainnet/.private.seed + - shell: bash + env: + CRUNCH_AGENT_CONFIG: ${{ secrets.MAINNET_CRUNCH_CONFIG }} + run: | + touch environments/cc3/mainnet/.env \ + && echo "$CRUNCH_AGENT_CONFIG" > environments/cc3/mainnet/.env + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + file: docker/mainnet.dockerfile + push: true + tags: gluwa/crunch:mainnet-${{github.run_id}} diff --git a/.github/workflows/release_bot.yml b/.github/workflows/release_bot.yml deleted file mode 100644 index 783a951..0000000 --- a/.github/workflows/release_bot.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Matrix - Push Notification -on: - release: - types: - - published -jobs: - ping_matrix: - strategy: - matrix: - channel: - - '!GXvEBJwHIZLDETyjJY:matrix.org' # #westend-crunch-bot:matrix.org -> Westend Crunch Bot (Public) - - '!gCVSnDNPSDmUBxnDte:matrix.org' # #kusama-crunch-bot:matrix.org -> Kusama Crunch Bot (Public) - - '!IdOnZRytzQTJTyZtQf:matrix.org' # #polkadot-crunch-bot:matrix.org -> Polkadot Crunch Bot (Public) - runs-on: ubuntu-latest - steps: - - uses: s3krit/matrix-message-action@v0.0.3 - with: - room_id: ${{ matrix.channel }} - access_token: ${{ secrets.MATRIX_ACCESS_TOKEN }} - message: "

Hey, crunch ${{github.event.release.tag_name}} has been released ✌️

🔖 ${{github.event.release.html_url}}

@room
${{github.event.release.body}}

" - server: "matrix.org" \ No newline at end of file diff --git a/.gitignore b/.gitignore index 3ed7f7f..8c88ffc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ /target /.vscode Cargo.lock -.env +/.env .private.seed -bump-version.sh \ No newline at end of file +bump-version.sh +/environments/**/.private.seed +**/.private.seed diff --git a/.gitleaksignore b/.gitleaksignore new file mode 100644 index 0000000..1adfa9f --- /dev/null +++ b/.gitleaksignore @@ -0,0 +1,3 @@ +d898bb595193a31320a36299f919ffd5be787553:.env.example:hashicorp-tf-password:40 +2d8ff3666f0d22a166c7237a6eef2281ef6b099d:.env.example:hashicorp-tf-password:48 +24c5ea403d127297c1deb541fbd6aa0f99572e16:README.md:hashicorp-tf-password:95 diff --git a/.jscpd.json b/.jscpd.json new file mode 100644 index 0000000..2999e3e --- /dev/null +++ b/.jscpd.json @@ -0,0 +1,3 @@ +{ + "ignore": ["src/crunch.rs", "src/config.rs", ".github/workflows/*.yml"] +} diff --git a/.lycheeignore b/.lycheeignore new file mode 100644 index 0000000..b437617 --- /dev/null +++ b/.lycheeignore @@ -0,0 +1 @@ +staticsitellvmhtml.z13.web.core.windows.net diff --git a/.mega-linter.yml b/.mega-linter.yml new file mode 100644 index 0000000..82c3050 --- /dev/null +++ b/.mega-linter.yml @@ -0,0 +1,5 @@ +--- +DISABLE_LINTERS: + - RUST_CLIPPY + - SPELL_CSPELL + - SPELL_MISSPELL diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..8c36f73 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +--- +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: end-of-file-fixer + - id: mixed-line-ending + args: [--fix=lf] + - id: trailing-whitespace diff --git a/.private.seed.example b/.private.seed.example deleted file mode 100644 index b184776..0000000 --- a/.private.seed.example +++ /dev/null @@ -1 +0,0 @@ -write your private seed here to sign the transactions \ No newline at end of file diff --git a/.remote.stashes.example b/.remote.stashes.example index 4789a2b..e01173c 100644 --- a/.remote.stashes.example +++ b/.remote.stashes.example @@ -1,3 +1,3 @@ 5C556QTtg1bJ43GDSgeowa3Ark6aeSHGTac1b2rKSXtgmSmW 5GTD7ZeD823BjpmZBCSzBQp7cvHR1Gunq7oDkurZr9zUev2n -5FUJHYEzKpVJfNbtXmR9HFqmcSEz6ak7ZUhBECz7GpsFkSYR \ No newline at end of file +5FUJHYEzKpVJfNbtXmR9HFqmcSEz6ak7ZUhBECz7GpsFkSYR diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 0000000..f4cb128 --- /dev/null +++ b/.trivyignore @@ -0,0 +1 @@ +DS017 diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 0e64a31..0000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,400 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## Changed -- Fix `onet_api_url` to depend on the connected chain and remove default endpoint. -- Update metadata Polkadot runtime/9430 - -## [0.10.1] - 2023-07-14 - -## New -- introducing option `--enable-pool-only-operator-compound` to allow for permissionless compound rewards of pool operators only -- introducing flag `--enable-pool-compound-threshold` to allow a threshold to be set. Only rewads higher than the threshold are triggered for compound. - -## change -- NOTE: option and respective flags have been renamed: - `--enable-all-nominees-payouts` -> `--enable-pool-all-nominees-payout` - `--enable-active-nominees-payout` -> `--enable-pool-active-nominees-payout` - `CRUNCH_ALL_NOMINEES_PAYOUTS_ENABLED` -> `CRUNCH_POOL_ALL_NOMINEES_PAYOUT_ENABLED` - `CRUNCH_ACTIVE_NOMINEES_PAYOUT_ENABLED` -> `CRUNCH_POOL_ACTIVE_NOMINEES_PAYOUT_ENABLED` - -## [0.10.0] - 2023-07-11 - -## change -- batch pool members with permissionless compound rewards defined -- fetch ONE-T grades - -## Changed -- Update subxt v0.29.0 -- use `force_batch` -- change `error_interval` to base pow function -- Support only Westend, Kusama, Polkadot (if nedeed other substrate-based chains could easily clone and adapt required changes) -- Update metadata Kusama runtime/9430 - -## [0.9.6] - 2023-06-15 - -- Update metadata Polkadot runtime/9420 -- Update metadata Westend runtime/9430 - -## [0.9.5] - 2023-05-25 - -- Update metadata Kusama runtime/9420 -- Update metadata Westend runtime/9420 - -## [0.9.3] - 2023-02-15 - -- Fixes active nominees stashes from previous era and not current from version 0.9.2 - -## [0.9.2] - 2023-02-15 - -- Fixes active nominees stashes from version 0.9.1 - -## [0.9.1] - 2023-02-15 - -### New -- Add optional flag 'enable-all-nominees-payouts'. Since this version, by default 'crunch' will only trigger payouts for active nominees that the Pool stake allocation was active in the previous era. The presence of this optional flag makes 'crunch' to try and trigger payouts to all nominees regardless if they were active or not. - -## [0.9.0] - 2023-02-15 - -### New -- Add optional flag 'enable-unique-stashes'. From all given stashes `crunch` will sort by stash address and remove duplicates. -- Add optional flag 'pool-ids' or environement variable 'CRUNCH_POOL_IDS'. `crunch` will try to fetch the nominees of the respective pool id predefined here before triggering the respective payouts. - -### Changed -- Update metadata Polkadot runtime/9360 -- Update metadata Kusama runtime/9370 -- Update metadata Westend runtime/9380 - -## [0.8.3] - 2023-01-20 - -### Changed -- Remove leading and trailing whitespace from remote stashes file -- Update metadata Polkadot runtime/9340 -- Update metadata Kusama runtime/9360 -- Update metadata Westend runtime/9370 - -## [0.8.1] - 2022-12-19 - -### Changed -- Aleph main & test networks [PR 23](https://github.com/turboflakes/crunch/pull/23) - -## [0.8.0] - 2022-12-15 - -### Changed -- subxt v0.25.0 -- Update metadata Kusama runtime/9350 -- Update metadata Westend runtime/9350 - -## [0.7.1] - 2022-12-15 - -- Update metadata Kusama runtime/9350 -- Update metadata Westend runtime/9350 - -## [0.6.3] - 2022-12-15 - -### Changed - -- Update metadata Kusama runtime/9320 -- Update metadata Westend runtime/9330 - -## [0.6.2] - 2022-12-06 - -### Changed - -- Update metadata Kusama runtime/9320 -- Update metadata Westend runtime/9330 - -## [0.6.1] - 2022-11-11 - -### Changed - -- Update metadata Polkadot runtime/9300 -- Update metadata Westend runtime/9320 -- Aleph main & test networks [PR 20](https://github.com/turboflakes/crunch/pull/20) - -## [0.6.0] - 2022-11-01 - -### New -- Add optional environement variable 'CRUNCH_EXISTENTIAL_DEPOSIT_FACTOR_WARNING' so that the factor value could be configurable per chain. Default value is 2. The recommended values based on the existential deposits is factor 2x for Polkadot and 1000x for Kusama. - -### Changed -- support `subxt` [v0.24.0](https://github.com/paritytech/subxt/releases/tag/v0.24.0) -- Update metadata Westend runtime/9310 - -## [0.5.15] - 2022-10-26 - -### Changed - -- Update metadata Kusama runtime/9300 -- Update metadata Westend runtime/9300 - -## [0.5.14] - 2022-10-18 - -### Changed - -- Update metadata Polkadot runtime/9291 - -## [0.5.13] - 2022-09-28 - -### Changed - -- Update `subxt v0.22.0` -- Update metadata Kusama runtime/9291 -- Update metadata Westend runtime/9290 - -## [0.5.11] - 2022-09-07 - -### New - -- Add support for Tidechain's testnet Lagoon [PR15](https://github.com/turboflakes/crunch/pull/15) - -### Changed - -- Update metadata Polkadot runtime/9280 - -## [0.5.10] - 2022-09-07 - -### Changed - -- Update metadata Kusama runtime/9280 - -## [0.5.9] - 2022-09-07 - -### Changed -- Change Kusama low balance warning to 1000 x ed -- Update metadata Polkadot runtime/9270 - -## [0.5.8] - 2022-08-31 - -- Update metadata Kusama runtime/9271 -- Update metadata Kusama runtime/9280 -- Update metadata Aleph Zero Testnet runtime/30 [PR 14](https://github.com/turboflakes/crunch/pull/14) - -## [0.5.7] - 2022-08-09 - -- Add support for Aleph Zero Mainnet -- Add metadata Aleph Zero Mainnet runtime/12 -- Update metadata Polkadot runtime/9260 -- Update metadata Westend runtime/9271 - -## [0.5.6] - 2022-08-06 - -### Added -- Add support for Aleph Zero Testnet -- Add metadata Aleph Zero Testnet runtime/30 - -## [0.5.5] - 2022-07-26 - -- Reduce number of recursive attempts to only once -- Update metadata Polkadot runtime/9250 -- Update metadata Kusama runtime/9260 - -## [0.5.4] - 2022-07-23 - -- Fix enable view and subscription modes - these modes were wrongly disabled in the previous released - -## [0.5.3] - 2022-07-21 - -- Fix recursive call in case of batch interrupted -- Update `subxt v.0.21.0` -- Update metadata Polkadot runtime/9230 -- Update metadata Kusama runtime/9250 -- Update metadata Westend runtime/9260 - -## [0.5.2] - 2022-03-17 - -### Changed - -- Fix skipping finalised blocks by updating `subxt` crate dependency to latest commit `8b19549` - version 0.18.1 -- Review summary description, with the addition of the number of stashes that had the previous era claimed earlier. -- Update metadata Polkadot runtime/9170 -- Update metadata Westend runtime/9170 - -## [0.5.1] - 2022-02-28 - -- Fix summary with a clickable details on top for with `is-short` flag. -- Update metadata Kusama runtime/9170 - -## [0.5.0] - 2022-02-22 - -### Added - -- Add summary with a clickable details on top. -- Add optional flag 'stashes-url' so that a list of stashes could be fetched from a remote endpoint - -### Changed - -- After the end of an era the payout is triggered after a random waiting period (up to a maximum of 120 seconds). This aims to prevent a block race from all `crunch` bots at the beginning of each era. -- Fix `Already claimed` rewards issue -- Fix parity default endpoints by defining port number -- Update metadata Polkadot runtime/9151 -- Update metadata Kusama runtime/9160 -- Update metadata Westend runtime/9160 - -## [0.4.1] - 2022-01-14 - -### Changed - -- Update metadata Westend runtime/9150 -- Update metadata Kusama runtime/9150 - -## [0.4.0] - 2022-01-11 - -### Changed - -- Changed single payouts for batch calls -- Update `subxt` dependency to revision `41bd8cc` - -### Added - -- Add `maximum-calls` flag with default value of 8. By default `crunch` collects all the outstanding payouts from previous eras and group all the extrinsic payout calls in group of 8 (or whatever value defined by this flag) so that a single batch call per group can be made. Using batch calls rather than single payouts we could expect a significant drop in transaction fees and a significat increase on `crunch` performance. - -### Changed - -## [0.3.2] - 2021-11-03 - -### Changed - -- Update substrate-subxt dependency. Subscription to `EraPaid` event should now run as expected without panic events every session. -- Fix loading `CRUNCH_SUBSTRATE_WS_URL` environment from `.env` file -- Default `error-interval` time reduced to 5 min -- Note: Batch calls are still not supported on this version - -## [0.3.0] - 2021-10-17 - -### Changed - -- Fix substrate-subxt dependency with support for metadata v14 -- Note: Batch calls are not supported on this version -> potentially on next release - -## [0.2.2] - 2021-10-09 - -### Added - -- Add `maximum_history_eras` flag with default value of 4. Note: This flag is only valid if `short` flag is also present. By default `crunch` will only check for unclaimed rewards in the last 4 eras rather than the last 84 as in previous versions. If running `crunch` in verbose mode the check in the last 84 eras still apply by default, since we would like to keep showing information regarding Inclusion and total Crunched eras for all history. - -### Changed - -- Fix loading configuration variables specified in `.env` file. -- Fix bug for new chains that have `current_era` value lower than `history_depth` constant. - -## [0.2.1] - 2021-09-30 - -### Added - -- Add bash script `crunch-update.sh` for easier install or crunch update - -### Changed - -- Identify an excellence performance by using Interquartile Range(IQR) -- Update substrate-subxt dependency (`Substrate_subxt error: Scale codec error: Could not decode 'Outcome', variant doesn't exist` error fixed) - -## [0.2.0] - 2021-09-25 - -### Added - -- Support a batch of dispatch calls by default -- Additional 99.9% confidence interval for performance reaction -- Additional randomness on emojis and flakes messages - -### Changed - -- Fix typos -- Improve identity -- Notification message refactored -- Minor messages typo changes -- Update substrate-subxt dependency -- Multilingual hello message - -## [0.1.18] - 2021-09-15 - -### Added - -- Optional flag --error-interval to adjust the time between crunch automatic restart in case of error -- Additional mode 'era' that subscribes to EraPaid on-chain event to trigger the payout - -### Changed - -- use 99% confidence interval for performance reaction -- update substrate-subxt dependency -- fix optional flag --debug - -## [0.1.17] - 2021-09-07 - -### Added - -- Warn if signer account free funds are lower than 2x the Existential Deposit -- Link validator identity to subscan.io -- Always show points and total reward amount plus good performance reaction - -### Changed - -- Remove *nothing to crunch this time message* if short flag is present -- Fix substrate-subxt dependency by commit hash -- Fix changelog - latest version comes first -- Change finalize block link to subscan.io - -## [0.1.15] - 2021-09-03 - -### Added - -- Optional flag --short to display only essencial information - -### Changed - -- Small adjustments on overal notifications - -## [0.1.14] - 2021-08-30 - -### Changed - -- Fix event 'Rewarded' active on chains runnimg Runtime 9090 - -## [0.1.13] - 2021-08-19 - -### Changed - -- Update dependencies - -## [0.1.12] - 2021-08-13 - -### Added - -- Show validator era points and average - -## [0.1.11] - 2021-08-13 - -### Changedd - -- Improve message readability -- Only send one matrix message per run - -## [0.1.9] - 2021-08-07 - -### Added - -- Add changelog (this file) -- Check if stash is currently in active set -- Improve messages readability - -### Changed - -- Highlight validator name in logs -- By default connect to local substrate node if no chain is specified - -## [0.1.8] - 2021-08-05 - -### Added - -- First release -- Claim staking rewards for one or a list of Validators -- Only inspect for claimed or unclaimed eras -- Easily connect to westend, kusama or polkadot Parity public nodes -- Set optional matrix bot -- Set `flakes` as default subcommand and optional `rewards` for a more regular logging/messages diff --git a/Cargo.toml b/Cargo.toml index 284b6dc..0370d5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "crunch" version = "0.10.1" authors = ["Paulo "] -description = "Crunch is a command-line interface (CLI) to claim staking rewards (flakes) every X hours for Substrate-based chains" +description = "Crunch is a command-line interface (CLI) to claim staking rewards (flakes) every X hours for Substrate-based chains" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -15,20 +15,20 @@ log = "0.4" clap = "2.33" lazy_static = "1.4" derive_more = "0.99" -async-recursion = "0.3.2" +async-recursion = "1.1.0" serde = "1.0.132" serde_json = "1.0.68" thiserror = "^1.0.24" chrono = "0.4" regex = "1.4.6" -reqwest = { version = "0.11", features = ["json"] } +reqwest = { version = "0.12", features = ["json"] } url = "2.2.2" -base64 = "0.13.0" +base64 = "0.22.0" rand = "0.8.4" # subxt dependencies subxt = "0.29.0" async-std = { version = "1.11.0", features = ["attributes", "tokio1"] } -env_logger = "0.9.3" +env_logger = "0.11.3" futures = "0.3.13" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "full", "bit-vec"] } hex = "0.4.3" diff --git a/Dockerfile b/Dockerfile index eaeb662..590d35d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,10 @@ +# hadolint global ignore=DL3008,DL4006 FROM ubuntu:jammy AS builder ARG PROFILE=release RUN apt-get update \ - && apt-get -y install build-essential curl libssl-dev pkg-config \ + && apt-get -y --no-install-recommends install build-essential curl libssl-dev pkg-config \ && rm -rf /var/lib/apt/lists/* RUN curl https://sh.rustup.rs -sSf | sh -s -- -y RUN /root/.cargo/bin/rustup update @@ -16,7 +17,7 @@ RUN /root/.cargo/bin/cargo build --$PROFILE --package crunch FROM ubuntu:jammy RUN apt-get update \ - && apt-get -y install ca-certificates \ + && apt-get -y --no-install-recommends install ca-certificates \ && rm -rf /var/lib/apt/lists/* ARG PROFILE=release @@ -31,3 +32,4 @@ ENV RUST_LOG="info" RUN /usr/local/bin/crunch --version ENTRYPOINT [ "/usr/local/bin/crunch" ] +HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 CMD curl --fail http://127.0.0.1:9999 || exit 1 diff --git a/LICENSE b/LICENSE index f49a4e1..261eeb9 100644 --- a/LICENSE +++ b/LICENSE @@ -198,4 +198,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/README.md b/README.md index b7af95e..c083e93 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,37 @@ -# crunch · ![latest release](https://github.com/turboflakes/crunch/actions/workflows/create_release.yml/badge.svg) +# Gluwa Related Info + +Information related to Gluwa specific changes for Crunch. + +## Metadata generation + +```shell +subxt metadata --version 14 -f bytes > metadata/creditcoin_metadata.scale +``` + +Or use the handy `gen_metadata.sh` script. + +## How to update docker image when metadata changes + +Create a new branch. Run a local version of your node with the updated runtime and run the +`gen_metadata.sh` script. Move that metadata to the `metadata` folder and check that the binary +compiles (Because subxt uses macros you need to perform a full compile and not just a `cargo check` +for completeness). After you test the new metadata create a PR and merge your branch into either +`mainnet`, `testnnet`, or `devnet`. A push to either of these branches triggers a workflow that +builds the docker image and pushes it to Gluwa's repo. +The image will be named `crunch-[BRANCH]:latest`. + +## runtimes/creditcoin.rs + +This file is almost identical to `runtimes/polkadot.rs`. The only changes made were to the path of the metadata file for the node_runtime and in the function `try_run_batch_pool_members`. The `member` field expected an Address32 according to the compiler and it the other implementations it was a `MultiAddress`. + +## Issues with M1 Macs + +If you have issues building the docker image for this repo and you are on an M1 mac try upgrading your version of MacOs. YMMV + +## crunch · ![latest release](https://github.com/turboflakes/crunch/actions/workflows/create_release.yml/badge.svg)

- +

`crunch` is a command-line interface (CLI) to easily automate payouts of staking rewards on Substrate-based chains. @@ -54,10 +84,10 @@ Configuration file example: [`.env.example`](https://github.com/turboflakes/crun ```bash # ---------------------------------------------------------------- -# crunch CLI configuration variables +# crunch CLI configuration variables # ---------------------------------------------------------------- # [CRUNCH_STASHES] Validator stash addresses for which 'crunch flakes', 'crunch rewards' -# or 'crunch view' will be applied. +# or 'crunch view' will be applied. # If needed specify more than one (e.g. stash_1,stash_2,stash_3). CRUNCH_STASHES=5GTD7ZeD823BjpmZBCSzBQp7cvHR1Gunq7oDkurZr9zUev2n # @@ -68,7 +98,7 @@ CRUNCH_STASHES_URL=https://raw.githubusercontent.com/turboflakes/crunch/main/.re # # [CRUNCH_SUBSTRATE_WS_URL] Substrate websocket endpoint for which 'crunch' will try to # connect. (e.g. wss://kusama-rpc.polkadot.io) (NOTE: substrate_ws_url takes precedence -# than argument) +# than argument) #CRUNCH_SUBSTRATE_WS_URL=wss://westend-rpc.polkadot.io:443 # # [CRUNCH_MAXIMUM_PAYOUTS] Maximum number of unclaimed eras for which an extrinsic payout @@ -76,16 +106,16 @@ CRUNCH_STASHES_URL=https://raw.githubusercontent.com/turboflakes/crunch/main/.re # 84 the maximum unclaimed payout calls for each stash address will be 4). [default: 4] CRUNCH_MAXIMUM_PAYOUTS=4 # -# [CRUNCH_MAXIMUM_HISTORY_ERAS] Maximum number of history eras for which crunch will look for +# [CRUNCH_MAXIMUM_HISTORY_ERAS] Maximum number of history eras for which crunch will look for # unclaimed rewards. The maximum value supported is the one defined by constant history_depth -# (e.g. a value of 4 means that crunch will only check in the latest 4 eras if there are any +# (e.g. a value of 4 means that crunch will only check in the latest 4 eras if there are any # unclaimed rewards for each stash address). [default: 4] CRUNCH_MAXIMUM_HISTORY_ERAS=4 # # [CRUNCH_MAXIMUM_CALLS] Maximum number of calls in a single batch. [default: 8] CRUNCH_MAXIMUM_CALLS=8 # -# [CRUNCH_SEED_PATH] File path containing the private seed phrase to Sign the extrinsic +# [CRUNCH_SEED_PATH] File path containing the private seed phrase to Sign the extrinsic # payout call. [default: .private.seed] #CRUNCH_SEED_PATH=.private.seed.example # ---------------------------------------------------------------- @@ -109,34 +139,38 @@ CRUNCH_ONET_NUMBER_LAST_SESSIONS=6 # `crunch` will try to fetch the nominees of the respective pool id predefined here before triggering the respective payouts CRUNCH_POOL_IDS=10,15 # -# [CRUNCH_POOL_COMPOUND_THRESHOLD] Define minimum pending rewards threshold in PLANCKS. +# [CRUNCH_POOL_COMPOUND_THRESHOLD] Define minimum pending rewards threshold in PLANCKS. # Note: only pending rewards above the threshold are included in the auto-compound batch. CRUNCH_POOL_COMPOUND_THRESHOLD=100000000000 # -# [CRUNCH_POOL_MEMBERS_COMPOUND_ENABLED] Enable auto-compound rewards for every member that belongs to the pools -# previously selected by CRUNCH_POOL_IDS. Note that members have to have their permissions +# [CRUNCH_POOL_MEMBERS_COMPOUND_ENABLED] Enable auto-compound rewards for every member that belongs to the pools +# previously selected by CRUNCH_POOL_IDS. Note that members have to have their permissions # set as PermissionlessCompound or PermissionlessAll. #CRUNCH_POOL_MEMBERS_COMPOUND_ENABLED=true # -# [CRUNCH_POOL_ONLY_OPERATOR_COMPOUND_ENABLED] Enable auto-compound rewards for the pool operator member that belongs to the pools -# previously selected by CRUNCH_POOL_IDS. Note that operator member account have to have their permissions +# [CRUNCH_POOL_ONLY_OPERATOR_COMPOUND_ENABLED] Enable auto-compound rewards for the pool operator member that belongs to the pools +# previously selected by CRUNCH_POOL_IDS. Note that operator member account have to have their permissions # set as PermissionlessCompound or PermissionlessAll. CRUNCH_POOL_ONLY_OPERATOR_COMPOUND_ENABLED=true # -# [CRUNCH_POOL_ACTIVE_NOMINEES_PAYOUT_ENABLED] Enable payouts only for ACTIVE nominees assigned to the pools +# [CRUNCH_POOL_ACTIVE_NOMINEES_PAYOUT_ENABLED] Enable payouts only for ACTIVE nominees assigned to the pools # previously selected by CRUNCH_POOL_IDS. #CRUNCH_POOL_ACTIVE_NOMINEES_PAYOUT_ENABLED=true # -# [CRUNCH_POOL_ALL_NOMINEES_PAYOUT_ENABLED] Enable payouts for ALL nominees assigned to the pools +# [CRUNCH_POOL_ALL_NOMINEES_PAYOUT_ENABLED] Enable payouts for ALL nominees assigned to the pools # previously selected by CRUNCH_POOL_IDS. #CRUNCH_POOL_ALL_NOMINEES_PAYOUT_ENABLED=true ``` -Create a seed private file `.private.seed` inside `crunch-bot` folder and write the private seed phrase of the account responsible to sign the extrinsic payout call as in [`.private.seed.example`](https://github.com/turboflakes/crunch/blob/main/.private.seed.example) (Note: `.private.seed` is the default name and a hidden file, if you want something different you can adjust it later with the option `crunch flakes --seed-path ~/crunch-bot/.kusama.private.seed` ) +Create a seed private file `.private.seed` inside `crunch-bot` folder and write the private seed +phrase of the account responsible to sign the extrinsic payout call as in +[`.private.seed.example`](https://github.com/turboflakes/crunch/blob/main/.private.seed.example) +(Note: `.private.seed` is the default name and a hidden file, if you want something different you +can adjust it later with the option `crunch flakes --seed-path ~/crunch-bot/.kusama.private.seed` ) ```bash #!/bin/bash -# create a file with a file editor (Vim in this case) and write the private seed phrase +# create a file with a file editor (Vim in this case) and write the private seed phrase # of the account responsible to sign the extrinsic payout call vi /crunch-bot/.private.seed # when ready write and quit (:wq!) @@ -162,9 +196,16 @@ WantedBy=multi-user.target ### Crunch Bot ([Matrix](https://matrix.org/)) -If you set up `crunch` on your server with a matrix user 👉 you get your own **Crunch Bot**. +If you set up `crunch` on your server with a matrix user 👉 you get your own **Crunch Bot**. -To enable **Crunch Bot** you will need to create a specific account on Element or similar and copy the values to the respective environment variables `CRUNCH_MATRIX_BOT_USER` and `CRUNCH_MATRIX_BOT_PASSWORD` like in the configuration example file [`.env.example`](https://github.com/turboflakes/crunch/blob/main/.env.example). You may also want to set your regular matrix user to the environment variable `CRUNCH_MATRIX_USER`. So that **Crunch Bot** could create a private room and send in messages. By default **Crunch Bot** will automatically invite your regular matrix user to a private room. Also by default **Crunch Bot** will send a copy of the messages to the respective network public room for which is connected to. +To enable **Crunch Bot** you will need to create a specific account on Element or similar and copy +the values to the respective environment variables `CRUNCH_MATRIX_BOT_USER` and +`CRUNCH_MATRIX_BOT_PASSWORD` like in the configuration example file +[`.env.example`](https://github.com/turboflakes/crunch/blob/main/.env.example). You may also want +to set your regular matrix user to the environment variable `CRUNCH_MATRIX_USER`. So that +**Crunch Bot** could create a private room and send in messages. By default **Crunch Bot** will +automatically invite your regular matrix user to a private room. Also by default **Crunch Bot** +will send a copy of the messages to the respective network public room for which is connected to. ### Public Rooms available @@ -173,19 +214,19 @@ Join and read the messages history of all the Public Rooms for which **Crunch Bo @@ -364,15 +405,25 @@ ARGS: hours) [default: era] [possible values: era, daily, turbo] ``` -Note: By default `crunch` collects the outstanding payouts from previous eras and group all the extrinsic payout calls in group of 4 or whatever value defined in the flag `maximum-calls` so that a single batch call per group can be made. The collection of all outstanding payouts from previous eras is also limited by 2 other flags. The first being `maximum-payouts` which default value is 4, this flag limits the number of payouts **per stash**. The other one is the `maximum-history-eras` which default is also 4, this flag limits the number of past eras `crunch` will look for unclaimed rewards - but this flag only applies if `short` flag is also used in the configuration. This is done so that `crunch` can run efficiently every era. +Note: By default `crunch` collects the outstanding payouts from previous eras and group all the +extrinsic payout calls in group of 4 or whatever value defined in the flag `maximum-calls` so that +a single batch call per group can be made. The collection of all outstanding payouts from previous +eras is also limited by 2 other flags. The first being `maximum-payouts` which default value is 4, +this flag limits the number of payouts **per stash**. The other one is the `maximum-history-eras` +which default is also 4, this flag limits the number of past eras `crunch` will look for unclaimed +rewards - but this flag only applies if `short` flag is also used in the configuration. This is done +so that `crunch` can run efficiently every era. -With that said, if it's the **first time** you are running `crunch` and you are not sure if you have any unclaimed rewards or if you just want to know for the stash accounts defined in the confguration file (`.env`), which eras from the last 84 have already been claimed or unclaimed, you can simply run `crunch view`. +With that said, if it's the **first time** you are running `crunch` and you are not sure if you have +any unclaimed rewards or if you just want to know for the stash accounts defined in the confguration +file (`.env`), which eras from the last 84 have already been claimed or unclaimed, you can simply run +`crunch view`. Note: The `crunch view` mode only logs information into the terminal. ```bash #!/bin/bash -# log unclaimed rewards for Westend network +# log unclaimed rewards for Westend network crunch westend view # or for Kusama network crunch kusama view @@ -385,7 +436,8 @@ Note: You can run `crunch` inside a tmux session and leave it, or using somethin ## Common issue on Ubuntu 22.04 when using the crunch binary Install previous openssl version from: -``` + +```bash wget http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.20_amd64.deb dpkg -i libssl1.1_1.1.1f-1ubuntu2.20_amd64.deb ``` @@ -446,17 +498,22 @@ cargo watch -x 'run --bin crunch' ### Downloading metadata from a Substrate node -Use the [`subxt-cli`](./cli) tool to download the metadata for your target runtime from a node. +Use the [`subxt-cli`](https://docs.substrate.io/reference/command-line-tools/subxt/) tool to download the metadata for your target runtime from a node. Install + ```bash cargo install subxt-cli ``` + Save the encoded metadata to a file + ```bash subxt metadata --url https://westend-rpc.polkadot.io -f bytes > westend_metadata.scale ``` + (Optional) Generate runtime API client code from metadata + ```bash subxt codegen --url https://westend-rpc.polkadot.io | rustfmt --edition=2018 --emit=stdout > westend_runtime.rs ``` @@ -508,9 +565,10 @@ Any feedback is welcome. `crunch` was made by **TurboFlakes**. Visit us at turboflakes.io to know more about our work. If you like this project - - 🚀 Share our work - - ✌️ Visit us at turboflakes.io - - ✨ Or you could also star the Github project :) + +- 🚀 Share our work +- ✌️ Visit us at turboflakes.io +- ✨ Or you could also star the Github project :) Tips are welcome @@ -524,8 +582,8 @@ Tips are welcome ### Quote > "Study hard what interests you the most in the most undisciplined, irreverent and original manner possible." -― Richard Feynmann +> ― Richard Feynmann -__ +\_\_ Enjoy `crunch` diff --git a/assets/crunchbot-avatar-128.png b/assets/crunchbot-avatar-128.png deleted file mode 100644 index a022564..0000000 Binary files a/assets/crunchbot-avatar-128.png and /dev/null differ diff --git a/assets/crunchbot-github-header.png b/assets/crunchbot-github-header.png deleted file mode 100644 index ebac827..0000000 Binary files a/assets/crunchbot-github-header.png and /dev/null differ diff --git a/assets/crunchbot-kusama-room-128.png b/assets/crunchbot-kusama-room-128.png deleted file mode 100644 index b97b16f..0000000 Binary files a/assets/crunchbot-kusama-room-128.png and /dev/null differ diff --git a/assets/crunchbot-kusama-room.png b/assets/crunchbot-kusama-room.png deleted file mode 100644 index 47e1339..0000000 Binary files a/assets/crunchbot-kusama-room.png and /dev/null differ diff --git a/assets/crunchbot-polkadot-room-128.png b/assets/crunchbot-polkadot-room-128.png deleted file mode 100644 index 8f994d1..0000000 Binary files a/assets/crunchbot-polkadot-room-128.png and /dev/null differ diff --git a/assets/crunchbot-polkadot-room.png b/assets/crunchbot-polkadot-room.png deleted file mode 100644 index 281fec4..0000000 Binary files a/assets/crunchbot-polkadot-room.png and /dev/null differ diff --git a/assets/crunchbot-westend-room-128.png b/assets/crunchbot-westend-room-128.png deleted file mode 100644 index beb934a..0000000 Binary files a/assets/crunchbot-westend-room-128.png and /dev/null differ diff --git a/assets/crunchbot-westend-room.png b/assets/crunchbot-westend-room.png deleted file mode 100644 index 37146ca..0000000 Binary files a/assets/crunchbot-westend-room.png and /dev/null differ diff --git a/assets/matrix-example-512.png b/assets/matrix-example-512.png deleted file mode 100644 index 357f594..0000000 Binary files a/assets/matrix-example-512.png and /dev/null differ diff --git a/crunch b/crunch new file mode 100755 index 0000000..e503edf Binary files /dev/null and b/crunch differ diff --git a/crunch-update.sh b/crunch-update.sh deleted file mode 100644 index cc3928f..0000000 --- a/crunch-update.sh +++ /dev/null @@ -1,56 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2021 Aukbit Ltd. -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -#!/bin/bash -# -# > make a file executable -# chmod +x ./crunch-update.sh - -DIRNAME="~/crunch-bot" -FILENAME="$DIRNAME/crunch" - -read -p "Enter the Crunch version that you would like to download (e.g.: 0.10.1): " INPUT_VERSION -if [ "$INPUT_VERSION" = "" ]; then - INPUT_VERSION="0.10.1" -fi - -URI="https://github.com/turboflakes/crunch/releases/download/v$INPUT_VERSION/crunch" -URI_SHA256="https://github.com/turboflakes/crunch/releases/download/v$INPUT_VERSION/crunch.sha256" -wget $URI && wget $URI_SHA256 - -if sha256sum -c crunch.sha256 2>&1 | grep -q 'OK' -then - if [ ! -d "$DIRNAME" ] - then - mkdir $DIRNAME - fi - if [[ -f "$FILENAME" ]] - then - mv "$FILENAME" "$FILENAME.backup" - fi - rm crunch.sha256 - chmod +x crunch - mv crunch "$FILENAME" - echo "** crunch v$INPUT_VERSION successfully downloaded and verified $FILENAME **" -else - echo "Error: SHA256 doesn't match!" - rm "$FILENAME*" -fi \ No newline at end of file diff --git a/docker/devnet.dockerfile b/docker/devnet.dockerfile new file mode 100644 index 0000000..781fd86 --- /dev/null +++ b/docker/devnet.dockerfile @@ -0,0 +1,38 @@ +# hadolint global ignore=DL3008,DL4006 +FROM ubuntu:jammy AS builder + +ARG PROFILE=release + +RUN apt-get update \ + && apt-get -y --no-install-recommends install build-essential curl libssl-dev pkg-config \ + && rm -rf /var/lib/apt/lists/* +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y +RUN /root/.cargo/bin/rustup update + +COPY . /app +WORKDIR /app +RUN /root/.cargo/bin/cargo build --$PROFILE --package crunch + +# ===== SECOND STAGE ====== +FROM ubuntu:jammy + +RUN apt-get update \ + && apt-get -y --no-install-recommends install ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +ARG PROFILE=release +COPY --from=builder /app/target/$PROFILE/crunch /usr/local/bin + +# Add the credentials needed to run crunch for this environment +COPY --from=builder /app/environments/cc3/devnet/* . + +RUN useradd -u 1000 -U -s /bin/sh crunch +USER crunch + +ENV RUST_BACKTRACE 1 +ENV RUST_LOG="info" + +RUN /usr/local/bin/crunch --version + +ENTRYPOINT [ "/usr/local/bin/crunch" ] +HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 CMD curl --fail http://127.0.0.1:9999 || exit 1 diff --git a/docker/mainnet.dockerfile b/docker/mainnet.dockerfile new file mode 100644 index 0000000..23f8ed0 --- /dev/null +++ b/docker/mainnet.dockerfile @@ -0,0 +1,38 @@ +# hadolint global ignore=DL3008,DL4006 +FROM ubuntu:jammy AS builder + +ARG PROFILE=release + +RUN apt-get update \ + && apt-get -y --no-install-recommends install build-essential curl libssl-dev pkg-config \ + && rm -rf /var/lib/apt/lists/* +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y +RUN /root/.cargo/bin/rustup update + +COPY . /app +WORKDIR /app +RUN /root/.cargo/bin/cargo build --$PROFILE --package crunch + +# ===== SECOND STAGE ====== +FROM ubuntu:jammy + +RUN apt-get update \ + && apt-get -y --no-install-recommends install ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +ARG PROFILE=release +COPY --from=builder /app/target/$PROFILE/crunch /usr/local/bin + +# Add the credentials needed to run crunch for this environment +COPY --from=builder app/environments/cc3/mainnet/* . + +RUN useradd -u 1000 -U -s /bin/sh crunch +USER crunch + +ENV RUST_BACKTRACE 1 +ENV RUST_LOG="info" + +RUN /usr/local/bin/crunch --version + +ENTRYPOINT [ "/usr/local/bin/crunch" ] +HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 CMD curl --fail http://127.0.0.1:9999 || exit 1 diff --git a/docker/testnet.dockerfile b/docker/testnet.dockerfile new file mode 100644 index 0000000..9bcb924 --- /dev/null +++ b/docker/testnet.dockerfile @@ -0,0 +1,38 @@ +# hadolint global ignore=DL3008,DL4006 +FROM ubuntu:jammy AS builder + +ARG PROFILE=release + +RUN apt-get update \ + && apt-get -y --no-install-recommends install build-essential curl libssl-dev pkg-config \ + && rm -rf /var/lib/apt/lists/* +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y +RUN /root/.cargo/bin/rustup update + +COPY . /app +WORKDIR /app +RUN /root/.cargo/bin/cargo build --$PROFILE --package crunch + +# ===== SECOND STAGE ====== +FROM ubuntu:jammy + +RUN apt-get update \ + && apt-get -y --no-install-recommends install ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +ARG PROFILE=release +COPY --from=builder /app/target/$PROFILE/crunch /usr/local/bin + +# Add the credentials needed to run crunch for this environment +COPY --from=builder app/environments/cc3/testnet/* . + +RUN useradd -u 1000 -U -s /bin/sh crunch +USER crunch + +ENV RUST_BACKTRACE 1 +ENV RUST_LOG="info" + +RUN /usr/local/bin/crunch --version + +ENTRYPOINT [ "/usr/local/bin/crunch" ] +HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 CMD curl --fail http://127.0.0.1:9999 || exit 1 diff --git a/environments/cc3/devnet/.env b/environments/cc3/devnet/.env new file mode 100644 index 0000000..3ab2f64 --- /dev/null +++ b/environments/cc3/devnet/.env @@ -0,0 +1,35 @@ +# ---------------------------------------------------------------- +# crunch CLI configuration variables +# ---------------------------------------------------------------- +# [CRUNCH_STASHES] Validator stash addresses for which 'crunch flakes', 'crunch rewards' +# or 'crunch view' will be applied. +# If needed specify more than one (e.g. stash_1,stash_2,stash_3). +CRUNCH_STASHES=5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY +# +# [CRUNCH_STASHES_URL] Additionally the list of stashes could be defined and available in a remote file. +# `crunch` will try to fetch the stashes from the endpoint predefined here before triggering the respective payouts +# Please have a look at the file '.remote.stashes.example' as an example +CRUNCH_STASHES_URL=https://raw.githubusercontent.com/gluwa/crunch/devnet/environments/cc3/devnet/stashes +# +# [CRUNCH_SUBSTRATE_WS_URL] Substrate websocket endpoint for which 'crunch' will try to +# connect. (e.g. wss://kusama-rpc.polkadot.io) (NOTE: substrate_ws_url takes precedence +# than argument) +CRUNCH_SUBSTRATE_WS_URL=wss://rpc.cc3-devnet.creditcoin.network:443 +# +# [CRUNCH_MAXIMUM_PAYOUTS] Maximum number of unclaimed eras for which an extrinsic payout +# will be submitted. (e.g. a value of 4 means that if there are unclaimed eras in the last +# 84 the maximum unclaimed payout calls for each stash address will be 4). [default: 4] +CRUNCH_MAXIMUM_PAYOUTS=4 +# +# [CRUNCH_MAXIMUM_HISTORY_ERAS] Maximum number of history eras for which crunch will look for +# unclaimed rewards. The maximum value supported is the one defined by constant history_depth +# (e.g. a value of 4 means that crunch will only check in the latest 4 eras if there are any +# unclaimed rewards for each stash address). [default: 4] +CRUNCH_MAXIMUM_HISTORY_ERAS=84 +# +# [CRUNCH_MAXIMUM_CALLS] Maximum number of calls in a single batch. [default: 4] +CRUNCH_MAXIMUM_CALLS=4 +# +# [CRUNCH_SEED_PATH] File path containing the private seed phrase to Sign the extrinsic +# payout call. [default: .private.seed] +#CRUNCH_SEED_PATH=.private.seed.example diff --git a/environments/cc3/devnet/stashes b/environments/cc3/devnet/stashes new file mode 100644 index 0000000..c76c3e8 --- /dev/null +++ b/environments/cc3/devnet/stashes @@ -0,0 +1,8 @@ +5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY +5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty +5DtK81vYffQKQT9Q4ecbtGyRZwvz4zSy6BUCzMLyefzr3ZKT +5EPZwQgZTB2Bik8PFrXt6kZe1VXwXvmZRyMTtE38nWZZPfXJ +5DwCyy7mSiujskM9R6DbbZaKUWbUJYa3fvZeNv8jVfenvUDU +5EAQimgnL18bp8DFLmQjiUN7bVJn2wzvn3qLCgMoP5U37Vjg +5FC15brkxZjbYvxfg7DEgZYe53YWvUkPQUb8BEnNo8CRsmHs +5GzeJiL9HFWSCue14yS3FxN8YwHK1Wdd7ckhN5YcEB4KZPa6 diff --git a/environments/cc3/mainnet/.gitkeep b/environments/cc3/mainnet/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/environments/cc3/testnet/.env b/environments/cc3/testnet/.env new file mode 100644 index 0000000..120fd81 --- /dev/null +++ b/environments/cc3/testnet/.env @@ -0,0 +1,35 @@ +# ---------------------------------------------------------------- +# crunch CLI configuration variables +# ---------------------------------------------------------------- +# [CRUNCH_STASHES] Validator stash addresses for which 'crunch flakes', 'crunch rewards' +# or 'crunch view' will be applied. +# If needed specify more than one (e.g. stash_1,stash_2,stash_3). +CRUNCH_STASHES=5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY +# +# [CRUNCH_STASHES_URL] Additionally the list of stashes could be defined and available in a remote file. +# `crunch` will try to fetch the stashes from the endpoint predefined here before triggering the respective payouts +# Please have a look at the file '.remote.stashes.example' as an example +CRUNCH_STASHES_URL=https://raw.githubusercontent.com/gluwa/crunch/testnet/environments/cc3/testnet/stashes +# +# [CRUNCH_SUBSTRATE_WS_URL] Substrate websocket endpoint for which 'crunch' will try to +# connect. (e.g. wss://kusama-rpc.polkadot.io) (NOTE: substrate_ws_url takes precedence +# than argument) +CRUNCH_SUBSTRATE_WS_URL=wss://rpc.cc3-testnet.creditcoin.network:443 +# +# [CRUNCH_MAXIMUM_PAYOUTS] Maximum number of unclaimed eras for which an extrinsic payout +# will be submitted. (e.g. a value of 4 means that if there are unclaimed eras in the last +# 84 the maximum unclaimed payout calls for each stash address will be 4). [default: 4] +CRUNCH_MAXIMUM_PAYOUTS=4 +# +# [CRUNCH_MAXIMUM_HISTORY_ERAS] Maximum number of history eras for which crunch will look for +# unclaimed rewards. The maximum value supported is the one defined by constant history_depth +# (e.g. a value of 4 means that crunch will only check in the latest 4 eras if there are any +# unclaimed rewards for each stash address). [default: 4] +CRUNCH_MAXIMUM_HISTORY_ERAS=84 +# +# [CRUNCH_MAXIMUM_CALLS] Maximum number of calls in a single batch. [default: 4] +CRUNCH_MAXIMUM_CALLS=4 +# +# [CRUNCH_SEED_PATH] File path containing the private seed phrase to Sign the extrinsic +# payout call. [default: .private.seed] +#CRUNCH_SEED_PATH=.private.seed.example diff --git a/environments/cc3/testnet/stashes b/environments/cc3/testnet/stashes new file mode 100644 index 0000000..5526364 --- /dev/null +++ b/environments/cc3/testnet/stashes @@ -0,0 +1,6 @@ +5DcvAgRBWuQfQCGK2zZE5nVw36WgdYrTZhvxD68hZCPW26bZ +5HGskSoTDbRAfiGxDVj4K4BDJBGxLjDaNvb7QLpURDNTrDjA +5HeEMRybEWWWrNc1DSAMXuqwyGwpuzcRVv7BvmfF3XyFqvfq +5FZLEzcPppgLT1mRDWSYKQE2RWwp66NSuxKZpnVmCgB7UVmn +5EpmD1q1P3qY38YqkM6UXt56Gfrz8vXTu1cVgG2WSibrxphQ +5CLvDXfB4vpz7knm3Me6C42aFKzz5H6UDg92aRFCuyABdJpZ diff --git a/gen_metadata.sh b/gen_metadata.sh new file mode 100755 index 0000000..fdea530 --- /dev/null +++ b/gen_metadata.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Two step script +# 1. check for a creditcoin node at TARGET_URL +# 2. extract the metadata from the node + +# Note: both WS and HTTP are served via the same port +TARGET_URL=${1:-http://127.0.0.1:9944} +TARGET_DEST=${2:-metadata/creditcoin_metadata.scale} +TARGET_VERSION=14 # needed for some reason after I upgraded subxt +CURL_PARAMS="-H 'Content-Type: application/json' -d '{\"id\":\"1\", \"jsonrpc\":\"2.0\", \"method\": \"state_getMetadata\", \"params\":[]}' $TARGET_URL" + +COUNTER=0 +# make sure there is a node running at TARGET_URL +while [[ "$(eval curl -s -o /dev/null -w '%{http_code}' "$CURL_PARAMS")" != "200" && $COUNTER -lt 10 ]]; do + echo "ATTEMPT: $COUNTER - Not ready yet ....." + ((COUNTER = COUNTER + 1)) + sleep 2 +done + +# fail if we still can't connect after 10 attempts +set -e + +# Note: using eval b/c params are specified as string above +eval curl "$CURL_PARAMS" >/dev/null + +subxt metadata --url "$TARGET_URL" --version "$TARGET_VERSION" -f bytes >"$TARGET_DEST" + +# Check for the target file and sound the alarm if its not found +if [ -e "$TARGET_DEST" ]; then + echo "$TARGET_DEST generated successfully" +else + echo "$TARGET_DEST not found" >&2 + exit 1 +fi diff --git a/metadata/creditcoin_metadata.scale b/metadata/creditcoin_metadata.scale new file mode 100644 index 0000000..69ce5db Binary files /dev/null and b/metadata/creditcoin_metadata.scale differ diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..f4883c8 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "1.77.0" # rustc 1.77.0 (aedd173a2 2024-03-17) +components = ["cargo", "clippy", "rustc", "rustfmt", "rust-src"] +profile = "minimal" diff --git a/src/config.rs b/src/config.rs index 4ab662b..63d5c06 100644 --- a/src/config.rs +++ b/src/config.rs @@ -31,7 +31,6 @@ // Set Config struct into a CONFIG lazy_static to avoid multiple processing. // use clap::{App, Arg, SubCommand}; -use dotenv; use lazy_static::lazy_static; use log::{info, warn}; use serde::Deserialize; @@ -482,12 +481,12 @@ fn get_config() -> Config { // Try to load configuration from file first let config_path = matches.value_of("config-path").unwrap_or(".env"); - match dotenv::from_filename(&config_path).ok() { + match dotenv::from_filename(config_path).ok() { Some(_) => info!("Loading configuration from {} file", &config_path), None => { let config_path = env::var("CRUNCH_CONFIG_FILENAME").unwrap_or(".env".to_string()); - if let Some(_) = dotenv::from_filename(&config_path).ok() { + if dotenv::from_filename(&config_path).is_ok() { info!("Loading configuration from {} file", &config_path); } } diff --git a/src/crunch.rs b/src/crunch.rs index 4c5a62f..35176ca 100644 --- a/src/crunch.rs +++ b/src/crunch.rs @@ -18,20 +18,27 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -use crate::config::{Config, CONFIG}; -use crate::errors::CrunchError; -use crate::matrix::Matrix; -use crate::runtimes::{ - kusama, polkadot, - support::{ChainPrefix, ChainTokenSymbol, SupportedRuntime}, - westend, +use crate::{ + config::{Config, CONFIG}, + errors::CrunchError, + runtimes::{ + creditcoin, kusama, polkadot, + support::{ChainPrefix, ChainTokenSymbol, SupportedRuntime}, + westend, + }, }; use async_std::task; use log::{debug, error, info, warn}; use rand::Rng; use regex::Regex; use serde::Deserialize; -use std::{convert::TryInto, result::Result, thread, time}; +use std::{ + convert::TryInto, + io::{prelude::*, BufReader}, + net::TcpListener, + result::Result, + thread, time, +}; use subxt::{ ext::sp_core::{crypto, sr25519, Pair as PairT}, @@ -132,7 +139,7 @@ pub async fn create_or_await_substrate_node_client( pub fn get_from_seed(seed: &str, pass: Option<&str>) -> sr25519::Pair { // Use regex to remove control characters let re = Regex::new(r"[\x00-\x1F]").unwrap(); - let clean_seed = re.replace_all(&seed.trim(), ""); + let clean_seed = re.replace_all(seed.trim(), ""); sr25519::Pair::from_string(&clean_seed, pass) .expect("constructed from known-good static value; qed") } @@ -140,7 +147,6 @@ pub fn get_from_seed(seed: &str, pass: Option<&str>) -> sr25519::Pair { pub struct Crunch { runtime: SupportedRuntime, client: OnlineClient, - matrix: Matrix, } impl Crunch { @@ -148,40 +154,13 @@ impl Crunch { let (client, runtime) = create_or_await_substrate_node_client(CONFIG.clone()).await; - // Initialize matrix client - let mut matrix: Matrix = Matrix::new(); - matrix.authenticate(runtime).await.unwrap_or_else(|e| { - error!("{}", e); - Default::default() - }); - - Crunch { - runtime, - client, - matrix, - } + Crunch { runtime, client } } pub fn client(&self) -> &OnlineClient { &self.client } - /// Returns the matrix configuration - pub fn matrix(&self) -> &Matrix { - &self.matrix - } - - pub async fn send_message( - &self, - message: &str, - formatted_message: &str, - ) -> Result<(), CrunchError> { - self.matrix() - .send_message(message, formatted_message) - .await?; - Ok(()) - } - /// Spawn and restart crunch flakes task on error pub fn flakes() { spawn_and_restart_crunch_flakes_on_error(); @@ -202,6 +181,7 @@ impl Crunch { SupportedRuntime::Polkadot => polkadot::inspect(self).await, SupportedRuntime::Kusama => kusama::inspect(self).await, SupportedRuntime::Westend => westend::inspect(self).await, + SupportedRuntime::Creditcoin => creditcoin::inspect(self).await, // _ => unreachable!(), } } @@ -211,6 +191,7 @@ impl Crunch { SupportedRuntime::Polkadot => polkadot::try_crunch(self).await, SupportedRuntime::Kusama => kusama::try_crunch(self).await, SupportedRuntime::Westend => westend::try_crunch(self).await, + SupportedRuntime::Creditcoin => creditcoin::try_crunch(self).await, // _ => unreachable!(), } } @@ -225,7 +206,10 @@ impl Crunch { } SupportedRuntime::Westend => { westend::run_and_subscribe_era_paid_events(self).await - } // _ => unreachable!(), + } + SupportedRuntime::Creditcoin => { + creditcoin::run_and_subscribe_era_paid_events(self).await + } } } } @@ -243,9 +227,6 @@ fn spawn_and_restart_subscription_on_error() { _ => { error!("{}", e); let sleep_min = u32::pow(config.error_interval, n); - let message = format!("On hold for {} min!", sleep_min); - let formatted_message = format!("
🚨 An error was raised -> crunch on hold for {} min while rescue is on the way 🚁 🚒 🚑 🚓

", sleep_min); - c.send_message(&message, &formatted_message).await.unwrap(); thread::sleep(time::Duration::from_secs((60 * sleep_min).into())); n += 1; continue; @@ -255,6 +236,9 @@ fn spawn_and_restart_subscription_on_error() { }; } }); + + healthcheck(); + task::block_on(t); } @@ -266,14 +250,8 @@ fn spawn_and_restart_crunch_flakes_on_error() { let c: Crunch = Crunch::new().await; if let Err(e) = c.try_run_batch().await { let sleep_min = u32::pow(config.error_interval, n); - match e { - CrunchError::MatrixError(_) => warn!("Matrix message skipped!"), - _ => { - error!("{}", e); - let message = format!("On hold for {} min!", sleep_min); - let formatted_message = format!("
🚨 An error was raised -> crunch on hold for {} min while rescue is on the way 🚁 🚒 🚑 🚓

", sleep_min); - c.send_message(&message, &formatted_message).await.unwrap(); - } + { + error!("{}", e); } thread::sleep(time::Duration::from_secs((60 * sleep_min).into())); n += 1; @@ -282,9 +260,34 @@ fn spawn_and_restart_crunch_flakes_on_error() { thread::sleep(time::Duration::from_secs(config.interval)); } }); + + healthcheck(); + task::block_on(t); } +fn healthcheck() -> async_std::task::JoinHandle<()> { + task::spawn(async { + let listener = TcpListener::bind("127.0.0.1:9999").unwrap(); + let response = "HTTP/1.1 200 OK\r\n\r\n".as_bytes(); + + for stream in listener.incoming() { + // unwrap and panic on error to interrupt the main task + let mut stream = stream.unwrap(); + + // we need to read the full request before we respond or we get a 'connection reset by peer error' + let buf_reader = BufReader::new(&mut stream); + let _http_request: Vec<_> = buf_reader + .lines() + .map(|result| result.unwrap()) + .take_while(|line| !line.is_empty()) + .collect(); + + stream.write_all(response).unwrap(); + } + }) +} + fn spawn_crunch_view() { let crunch_task = task::spawn(async { let c: Crunch = Crunch::new().await; @@ -303,7 +306,7 @@ pub fn random_wait(max: u64) -> u64 { pub async fn try_fetch_stashes_from_remote_url( ) -> Result>, CrunchError> { let config = CONFIG.clone(); - if config.stashes_url.len() == 0 { + if config.stashes_url.is_empty() { return Ok(None); } let response = reqwest::get(&config.stashes_url).await?.text().await?; @@ -333,7 +336,7 @@ pub async fn try_fetch_onet_data( return Ok(None); } - let endpoint = if config.onet_api_url != "" { + let endpoint = if !config.onet_api_url.is_empty() { config.onet_api_url } else { format!("https://{}-onet-api-beta.turboflakes.io", chain_name) @@ -357,10 +360,12 @@ pub async fn try_fetch_onet_data( reqwest::StatusCode::OK => { match response.json::().await { Ok(parsed) => return Ok(Some(parsed)), - Err(e) => error!( - "Unable to parse ONE-T response for stash {} error: {:?}", - stash, e - ), + Err(e) => { + error!( + "Unable to parse ONE-T response for stash {} error: {:?}", + stash, e + ) + } }; } other => { diff --git a/src/errors.rs b/src/errors.rs index 180a6a8..d9c41ab 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -19,8 +19,6 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -use codec; -use reqwest; use std::{str::Utf8Error, string::String}; use subxt::error::{DispatchError, MetadataError}; use thiserror::Error; diff --git a/src/main.rs b/src/main.rs index 7ccfe5f..404671d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -22,14 +22,12 @@ mod config; mod crunch; mod errors; -mod matrix; mod pools; mod report; mod runtimes; mod stats; -use crate::config::CONFIG; -use crate::crunch::Crunch; +use crate::{config::CONFIG, crunch::Crunch}; use log::info; use std::env; @@ -55,5 +53,6 @@ fn main() { if config.is_mode_era { return Crunch::subscribe(); } + info!("flaking"); Crunch::flakes() } diff --git a/src/matrix.rs b/src/matrix.rs deleted file mode 100644 index 66c3ab4..0000000 --- a/src/matrix.rs +++ /dev/null @@ -1,570 +0,0 @@ -// The MIT License (MIT) -// Copyright © 2021 Aukbit Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -#![allow(dead_code)] -use crate::config::CONFIG; -use crate::errors::MatrixError; -use crate::runtimes::support::SupportedRuntime; -use async_recursion::async_recursion; -use base64::encode; -use log::{debug, info, warn}; -use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, result::Result, thread, time}; -use url::form_urlencoded::byte_serialize; - -const MATRIX_URL: &str = "https://matrix.org/_matrix/client/r0"; - -type AccessToken = String; -type RoomID = String; -type EventID = String; - -impl SupportedRuntime { - fn public_room_alias(&self) -> String { - format!("#{}-crunch-bot:matrix.org", self.to_string().to_lowercase()) - } -} - -#[derive(Deserialize, Debug, Default)] -struct Room { - #[serde(default)] - room_id: RoomID, - #[serde(default)] - servers: Vec, - #[serde(default)] - room_alias: String, - #[serde(default)] - room_alias_name: String, -} - -fn define_private_room_alias_name( - pkg_name: &str, - chain_name: &str, - matrix_user: &str, - matrix_bot_user: &str, -) -> String { - encode( - format!( - "{}/{}/{}/{}", - pkg_name, chain_name, matrix_user, matrix_bot_user - ) - .as_bytes(), - ) -} - -impl Room { - fn new_private(chain: SupportedRuntime) -> Room { - let config = CONFIG.clone(); - let room_alias_name = define_private_room_alias_name( - env!("CARGO_PKG_NAME"), - &chain.to_string(), - &config.matrix_user, - &config.matrix_bot_user, - ); - let v: Vec<&str> = config.matrix_bot_user.split(":").collect(); - Room { - room_alias_name: room_alias_name.to_string(), - room_alias: format!("#{}:{}", room_alias_name.to_string(), v.last().unwrap()), - ..Default::default() - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -struct LoginRequest { - r#type: String, - user: String, - password: String, -} - -#[derive(Deserialize, Debug)] -struct LoginResponse { - user_id: String, - access_token: AccessToken, - home_server: String, - device_id: String, - // "well_known": { - // "m.homeserver": { - // "base_url": "https://matrix-client.matrix.org/" - // } - // } -} - -#[derive(Debug, Serialize, Deserialize)] -struct CreateRoomRequest { - name: String, - room_alias_name: String, - topic: String, - preset: String, - invite: Vec, - is_direct: bool, -} - -#[derive(Debug, Serialize, Deserialize)] -struct SendRoomMessageRequest { - msgtype: String, - body: String, - format: String, - formatted_body: String, -} - -#[derive(Deserialize, Debug)] -struct SendRoomMessageResponse { - event_id: EventID, -} - -#[derive(Deserialize, Debug)] -struct JoinedRoomsResponse { - joined_rooms: Vec, -} - -#[derive(Deserialize, Debug)] -struct ErrorResponse { - errcode: String, - error: String, -} - -#[derive(Clone, Debug)] -pub struct Matrix { - pub client: reqwest::Client, - access_token: Option, - chain: SupportedRuntime, - private_room_id: String, - public_room_id: String, - disabled: bool, -} - -impl Default for Matrix { - fn default() -> Matrix { - Matrix { - client: reqwest::Client::new(), - access_token: None, - chain: SupportedRuntime::Westend, - private_room_id: String::from(""), - public_room_id: String::from(""), - disabled: false, - } - } -} - -impl Matrix { - pub fn new() -> Matrix { - let config = CONFIG.clone(); - Matrix { - disabled: config.matrix_disabled, - ..Default::default() - } - } - - pub async fn login(&mut self) -> Result<(), MatrixError> { - if self.disabled { - return Ok(()); - } - let config = CONFIG.clone(); - if let None = config.matrix_bot_user.find(":") { - return Err(MatrixError::Other(format!("matrix bot user '{}' does not specify the matrix server e.g. '@your-own-crunch-bot-account:matrix.org'", config.matrix_bot_user))); - } - let client = self.client.clone(); - let req = LoginRequest { - r#type: "m.login.password".to_string(), - user: config.matrix_bot_user.to_string(), - password: config.matrix_bot_password.to_string(), - }; - - let res = client - .post(format!("{}/login", MATRIX_URL)) - .json(&req) - .send() - .await?; - - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - let response = res.json::().await?; - self.access_token = Some(response.access_token); - info!( - "The 'Crunch Bot' user {} has been authenticated at {}", - response.user_id, response.home_server - ); - Ok(()) - } - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - - #[allow(dead_code)] - pub async fn logout(&mut self) -> Result<(), MatrixError> { - if self.disabled { - return Ok(()); - } - match &self.access_token { - Some(access_token) => { - let client = self.client.clone(); - let res = client - .post(format!( - "{}/logout?access_token={}", - MATRIX_URL, access_token - )) - .send() - .await?; - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - self.access_token = None; - Ok(()) - } - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - None => Err(MatrixError::Other("access_token not defined".to_string())), - } - } - - // Login user, get or create private room and join public room - pub async fn authenticate( - &mut self, - chain: SupportedRuntime, - ) -> Result<(), MatrixError> { - if self.disabled { - return Ok(()); - } - let config = CONFIG.clone(); - // Set chain - self.chain = chain; - // Login - self.login().await?; - // Get or create user private room - if let Some(private_room) = self.get_or_create_private_room().await? { - self.private_room_id = private_room.room_id; - info!( - "Messages will be sent to room {} (Private)", - private_room.room_alias - ); - } - // Verify if user did not disabled public room in config - if !config.matrix_public_room_disabled { - // Join public room if not a member - match self - .get_room_id_by_room_alias(&self.chain.public_room_alias()) - .await? - { - Some(public_room_id) => { - // Join room if not already a member - let joined_rooms = self.get_joined_rooms().await?; - debug!("joined_rooms {:?}", joined_rooms); - if !joined_rooms.contains(&public_room_id) { - self.join_room(&public_room_id).await?; - } - self.public_room_id = public_room_id; - } - None => { - return Err(MatrixError::Other(format!( - "Public room {} not found.", - self.chain.public_room_alias() - ))) - } - } - info!( - "Messages will be sent to room {} (Public)", - self.chain.public_room_alias() - ); - } - // Change Crunch Bot display name - if !config.matrix_bot_display_name_disabled { - self.change_bot_display_name().await?; - } - Ok(()) - } - - async fn change_bot_display_name(&self) -> Result<(), MatrixError> { - match &self.access_token { - Some(access_token) => { - let config = CONFIG.clone(); - let client = self.client.clone(); - let v: Vec<&str> = config.matrix_user.split(":").collect(); - let username = v.first().unwrap(); - let display_name = format!("Crunch Bot ({})", &username[1..]); - let mut data = HashMap::new(); - data.insert("displayname", &display_name); - let user_id_encoded: String = - byte_serialize(config.matrix_bot_user.as_bytes()).collect(); - let res = client - .put(format!( - "{}/profile/{}/displayname?access_token={}", - MATRIX_URL, user_id_encoded, access_token - )) - .json(&data) - .send() - .await?; - - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - info!("{} * Matrix bot display name changed", &display_name); - Ok(()) - } - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - None => Err(MatrixError::Other("access_token not defined".to_string())), - } - } - - async fn get_room_id_by_room_alias( - &self, - room_alias: &str, - ) -> Result, MatrixError> { - let client = self.client.clone(); - let room_alias_encoded: String = byte_serialize(room_alias.as_bytes()).collect(); - let res = client - .get(format!( - "{}/directory/room/{}", - MATRIX_URL, room_alias_encoded - )) - .send() - .await?; - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - let room = res.json::().await?; - debug!("{} * Matrix room alias", room_alias); - Ok(Some(room.room_id)) - } - reqwest::StatusCode::NOT_FOUND => Ok(None), - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - - async fn create_private_room(&self) -> Result, MatrixError> { - match &self.access_token { - Some(access_token) => { - let config = CONFIG.clone(); - let client = self.client.clone(); - let room: Room = Room::new_private(self.chain); - let req = CreateRoomRequest { - name: format!("{} Crunch Bot (Private)", self.chain), - room_alias_name: room.room_alias_name.to_string(), - topic: - "Crunch Bot <> Automate staking rewards (flakes) every X hours" - .to_string(), - preset: "trusted_private_chat".to_string(), - invite: vec![config.matrix_user], - is_direct: true, - }; - let res = client - .post(format!( - "{}/createRoom?access_token={}", - MATRIX_URL, access_token - )) - .json(&req) - .send() - .await?; - - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - let mut r = res.json::().await?; - r.room_alias = room.room_alias; - r.room_alias_name = room.room_alias_name; - info!("{} * Matrix private room alias created", r.room_alias); - Ok(Some(r)) - } - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - None => Err(MatrixError::Other("access_token not defined".to_string())), - } - } - - async fn get_or_create_private_room(&self) -> Result, MatrixError> { - match &self.access_token { - Some(_) => { - let mut room: Room = Room::new_private(self.chain); - match self.get_room_id_by_room_alias(&room.room_alias).await? { - Some(room_id) => { - room.room_id = room_id; - Ok(Some(room)) - } - None => Ok(self.create_private_room().await?), - } - } - None => Err(MatrixError::Other("access_token not defined".to_string())), - } - } - - async fn get_joined_rooms(&self) -> Result, MatrixError> { - match &self.access_token { - Some(access_token) => { - let client = self.client.clone(); - let res = client - .get(format!( - "{}/joined_rooms?access_token={}", - MATRIX_URL, access_token - )) - .send() - .await?; - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - let response = res.json::().await?; - Ok(response.joined_rooms) - } - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - None => Err(MatrixError::Other("access_token not defined".to_string())), - } - } - - #[async_recursion] - async fn join_room(&self, room_id: &str) -> Result, MatrixError> { - match &self.access_token { - Some(access_token) => { - let client = self.client.clone(); - let room_id_encoded: String = - byte_serialize(room_id.as_bytes()).collect(); - let res = client - .post(format!( - "{}/join/{}?access_token={}", - MATRIX_URL, room_id_encoded, access_token - )) - .send() - .await?; - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - let room = res.json::().await?; - info!("The room {} has been joined.", room.room_id); - Ok(Some(room.room_id)) - } - reqwest::StatusCode::TOO_MANY_REQUESTS => { - let response = res.json::().await?; - warn!( - "Matrix {} -> Wait 5 seconds and try again", - response.error - ); - thread::sleep(time::Duration::from_secs(5)); - return self.join_room(room_id).await; - } - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - None => Err(MatrixError::Other("access_token not defined".to_string())), - } - } - - pub async fn send_message( - &self, - message: &str, - formatted_message: &str, - ) -> Result<(), MatrixError> { - if self.disabled { - return Ok(()); - } - let config = CONFIG.clone(); - // Send message to private room (private assigned to the matrix_username in config) - self.dispatch_message(&self.private_room_id, &message, &formatted_message) - .await?; - // Send message to public room (public room available for the connected chain) - if !config.matrix_public_room_disabled { - self.dispatch_message(&self.public_room_id, &message, &formatted_message) - .await?; - } - - Ok(()) - } - - #[async_recursion] - async fn dispatch_message( - &self, - room_id: &str, - message: &str, - formatted_message: &str, - ) -> Result, MatrixError> { - if self.disabled { - return Ok(None); - } - match &self.access_token { - Some(access_token) => { - let client = self.client.clone(); - let req = SendRoomMessageRequest { - msgtype: "m.text".to_string(), - body: message.to_string(), - format: "org.matrix.custom.html".to_string(), - formatted_body: formatted_message.to_string(), - }; - - let res = client - .post(format!( - "{}/rooms/{}/send/m.room.message?access_token={}", - MATRIX_URL, room_id, access_token - )) - .json(&req) - .send() - .await?; - - debug!("response {:?}", res); - match res.status() { - reqwest::StatusCode::OK => { - let response = res.json::().await?; - debug!("{:?} * Matrix messsage dispatched", response); - Ok(Some(response.event_id)) - } - reqwest::StatusCode::TOO_MANY_REQUESTS => { - let response = res.json::().await?; - warn!( - "Matrix {} -> Wait 5 seconds and try again", - response.error - ); - thread::sleep(time::Duration::from_secs(5)); - return self - .dispatch_message(room_id, message, formatted_message) - .await; - } - _ => { - let response = res.json::().await?; - Err(MatrixError::Other(response.error)) - } - } - } - None => Err(MatrixError::Other("access_token not defined".to_string())), - } - } -} diff --git a/src/pools.rs b/src/pools.rs index 2dbcf67..f51b8f2 100644 --- a/src/pools.rs +++ b/src/pools.rs @@ -61,7 +61,7 @@ pub fn nomination_pool_account(account_type: AccountType, pool_id: u32) -> Accou let buffer_hex = buffer.encode_hex::(); // NOTE: subxt::utils::AccountId32 currently doesn't support from hex conversion let acc = subxt::ext::sp_runtime::AccountId32::from_str(&buffer_hex).unwrap(); - return AccountId32::from_str(&acc.to_string()).unwrap(); + AccountId32::from_str(&acc.to_string()).unwrap() } #[test] diff --git a/src/report.rs b/src/report.rs index 9a424d1..b89284d 100644 --- a/src/report.rs +++ b/src/report.rs @@ -155,10 +155,12 @@ impl Report { self.add_raw_text("".into()); } + #[allow(dead_code)] pub fn message(&self) -> String { self.body.join("\n") } + #[allow(dead_code)] pub fn formatted_message(&self) -> String { self.body.join("
") } @@ -187,7 +189,7 @@ impl From for Report { * 100.0, ) } else { - format!("") + String::new() }; let summary_already_desc = if data @@ -202,7 +204,7 @@ impl From for Report { .total_validators_previous_era_already_claimed, ) } else { - format!("") + String::new() }; let summary_next_desc = if data.payout_summary.next_minimum_expected > 0 { @@ -258,12 +260,12 @@ impl From for Report { report.add_raw_text(format!( "{} {}", is_active_desc, - data.network.name.to_lowercase().trim().replace(" ", ""), + data.network.name.to_lowercase().trim().replace(' ', ""), validator.stash, validator.name, )); // Show validator warnings - if validator.warnings.len() > 0 { + if !validator.warnings.is_empty() { for warning in validator.warnings { report.add_raw_text(format!("⚠️ {} ⚠️", warning.clone())); warn!("{}", warning); @@ -277,7 +279,7 @@ impl From for Report { )); // Check if there are no payouts - if validator.payouts.len() == 0 { + if validator.payouts.is_empty() { if validator.is_active { report.add_text(format!( "🥣 Looking forward for next crunch {} {}", @@ -301,7 +303,7 @@ impl From for Report { / 10f64.powi(data.network.token_decimals.into()), data.network.token_symbol, good_performance( - payout.points.validator.into(), + payout.points.validator, payout.points.ci99_9_interval.1, payout.points.outlier_limits.1 ) @@ -350,18 +352,18 @@ impl From for Report { // Block number report.add_raw_text(format!( - "💯 Payout for era {} finalized at block #{} + "💯 Payout for era {} finalized at block #{} ({}) ✨", payout.era_index, payout.block_number, - data.network.name.to_lowercase().trim().replace(" ", ""), + data.network.name.to_lowercase().trim().replace(' ', ""), payout.extrinsic, - payout.extrinsic.to_string() + payout.extrinsic )); } // Check if there are still eras left to claim - if validator.unclaimed.len() > 0 { + if !validator.unclaimed.is_empty() { let symbols = number_to_symbols(validator.unclaimed.len(), "⚡", 84); report.add_text(format!( "{} There are still {} eras left with {} to crunch {}", @@ -393,7 +395,7 @@ impl From for Report { )); // Claimed - if validator.claimed.len() > 0 { + if !validator.claimed.is_empty() { let claimed_percentage = (validator.claimed.len() as f32 / (validator.claimed.len() + validator.unclaimed.len()) as f32) * 100.0; @@ -432,12 +434,12 @@ impl From for Report { )); for batch in data.pools_summary.batches { report.add_raw_text(format!( - "💯 Batch finalized at block #{} + "💯 Batch finalized at block #{} ({}) ✨", batch.block_number, - data.network.name.to_lowercase().trim().replace(" ", ""), + data.network.name.to_lowercase().trim().replace(' ', ""), batch.extrinsic, - batch.extrinsic.to_string() + batch.extrinsic )); } } else { @@ -474,9 +476,9 @@ impl From for Report { fn number_to_symbols(n: usize, symbol: &str, max: usize) -> String { let cap: usize = match n { - n if n < (max / 4) as usize => 1, - n if n < (max / 2) as usize => 2, - n if n < max - (max / 4) as usize => 3, + n if n < (max / 4) => 1, + n if n < (max / 2) => 2, + n if n < max - (max / 4) => 3, _ => 4, }; let v = vec![""; cap + 1]; diff --git a/src/runtimes/README.md b/src/runtimes/README.md index 31cdaf0..ee7461f 100644 --- a/src/runtimes/README.md +++ b/src/runtimes/README.md @@ -1,9 +1,9 @@ -## Supported Runtimes - - Polkadot - - Kusama - - Westend - - Aleph Zero testnet - - Aleph Zero mainnet +# Supported Runtimes +- Polkadot +- Kusama +- Westend +- Aleph Zero testnet +- Aleph Zero mainnet TODO: Improve the runtimes implementation without the need of replicating the same functions for each runtime. Note that *RuntimeApi* is runtime specific. It gives access to api functions specific for each runtime. diff --git a/src/runtimes/creditcoin.rs b/src/runtimes/creditcoin.rs new file mode 100644 index 0000000..0bc659f --- /dev/null +++ b/src/runtimes/creditcoin.rs @@ -0,0 +1,1287 @@ +use crate::{ + config::CONFIG, + crunch::{ + get_account_id_from_storage_key, get_from_seed, random_wait, try_fetch_onet_data, + try_fetch_stashes_from_remote_url, Crunch, NominatorsAmount, ValidatorAmount, + ValidatorIndex, + }, + errors::CrunchError, + pools::{nomination_pool_account, AccountType}, + report::{ + Batch, EraIndex, Network, NominationPoolsSummary, Payout, PayoutSummary, Points, + RawData, Report, Signer, Validator, Validators, + }, + stats, +}; +use async_recursion::async_recursion; +use futures::StreamExt; +use log::{debug, info, warn}; +use std::{ + cmp, + convert::{TryFrom, TryInto}, + fs, + result::Result, + str::FromStr, + thread, time, +}; +use subxt::{ + error::DispatchError, + ext::{ + codec::Encode, + sp_core::{sr25519, Pair as PairT}, + }, + tx::PairSigner, + utils::AccountId32, + PolkadotConfig, +}; + +#[subxt::subxt( + runtime_metadata_path = "metadata/creditcoin_metadata.scale", + derive_for_all_types = "Clone, PartialEq" +)] +mod node_runtime {} + +use node_runtime::{ + runtime_types::{ + bounded_collections::bounded_vec::BoundedVec, + pallet_nomination_pools::{BondExtra, ClaimPermission}, + }, + staking::events::{EraPaid, PayoutStarted, Rewarded}, + system::events::ExtrinsicFailed, + utility::events::{ + BatchCompleted, BatchCompletedWithErrors, BatchInterrupted, ItemCompleted, + ItemFailed, + }, +}; + +type Call = node_runtime::runtime_types::creditcoin3_runtime::RuntimeCall; +type StakingCall = node_runtime::runtime_types::pallet_staking::pallet::pallet::Call; +type NominationPoolsCall = + node_runtime::runtime_types::pallet_nomination_pools::pallet::Call; + +pub async fn run_and_subscribe_era_paid_events( + crunch: &Crunch, +) -> Result<(), CrunchError> { + info!("Inspect and `crunch` unclaimed payout rewards"); + // Run once before start subscription + try_crunch(&crunch).await?; + info!("Subscribe 'EraPaid' on-chain finalized event"); + let api = crunch.client().clone(); + let mut block_sub = api.blocks().subscribe_finalized().await?; + while let Some(block) = block_sub.next().await { + let block = block?; + + let events = block.events().await?; + + // Event --> staking::EraPaid + if let Some(_event) = events.find_first::()? { + let wait: u64 = random_wait(240); + info!("Waiting {} seconds before run batch", wait); + thread::sleep(time::Duration::from_secs(wait)); + try_crunch(&crunch).await?; + } + } + // If subscription has closed for some reason await and subscribe again + Err(CrunchError::SubscriptionFinished) +} + +pub async fn try_crunch(crunch: &Crunch) -> Result<(), CrunchError> { + let config = CONFIG.clone(); + let api = crunch.client().clone(); + + // Load seed account + let seed = fs::read_to_string(config.seed_path) + .expect("Something went wrong reading the seed file"); + let seed_account: sr25519::Pair = get_from_seed(&seed, None); + let seed_account_signer = + PairSigner::::new(seed_account.clone()); + let seed_account_id: AccountId32 = seed_account.public().into(); + + // Get signer account identity + let signer_name = get_display_name(&crunch, &seed_account_id, None).await?; + let mut signer = Signer { + account: seed_account_id.clone(), + name: signer_name, + warnings: Vec::new(), + }; + debug!("signer {:?}", signer); + + // Warn if signer account is running low on funds (if lower than 2x Existential Deposit) + let ed_addr = node_runtime::constants().balances().existential_deposit(); + let ed = api.constants().at(&ed_addr)?; + + let seed_account_info_addr = + node_runtime::storage().system().account(&seed_account_id); + if let Some(seed_account_info) = api + .storage() + .at_latest() + .await? + .fetch(&seed_account_info_addr) + .await? + { + if seed_account_info.data.free + <= (config.existential_deposit_factor_warning as u128 * ed) + { + signer + .warnings + .push("⚡ Signer account is running low on funds ⚡".to_string()); + } + } + + // Try run payouts in batches + let (mut validators, payout_summary) = + try_run_batch_payouts(&crunch, &seed_account_signer).await?; + + // Try run members in batches + let pools_summary = try_run_batch_pool_members(&crunch, &seed_account_signer).await?; + + // Get Network name + let chain_name = api.rpc().system_chain().await?; + + // Try fetch ONE-T grade data + for v in &mut validators { + v.onet = try_fetch_onet_data(chain_name.to_lowercase(), v.stash.clone()).await?; + } + + // Get Era index + let active_era_addr = node_runtime::storage().staking().active_era(); + let active_era_index = match api + .storage() + .at_latest() + .await? + .fetch(&active_era_addr) + .await? + { + Some(info) => info.index, + None => return Err(CrunchError::Other("Active era not available".into())), + }; + + let properties = api.rpc().system_properties().await?; + + // Get Token symbol + let token_symbol: String = if let Some(token_symbol) = properties.get("tokenSymbol") { + token_symbol.as_str().unwrap_or_default().to_string() + } else { + "ND".to_string() + }; + + // Get Token decimals + let token_decimals: u8 = if let Some(token_decimals) = properties.get("tokenDecimals") + { + token_decimals + .as_u64() + .unwrap_or_default() + .try_into() + .unwrap() + } else { + 12 + }; + + // Set network info + let network = Network { + name: chain_name, + active_era: active_era_index, + token_symbol, + token_decimals, + }; + debug!("network {:?}", network); + + let data = RawData { + network, + signer, + validators, + payout_summary, + pools_summary, + }; + + let _report = Report::from(data); + + Ok(()) +} + +pub async fn try_run_batch_pool_members( + crunch: &Crunch, + signer: &PairSigner, +) -> Result { + let config = CONFIG.clone(); + let api = crunch.client().clone(); + + let mut calls_for_batch: Vec = vec![]; + let mut summary: NominationPoolsSummary = Default::default(); + + if let Some(members) = try_fetch_pool_members_for_compound(&crunch).await? { + // + for member in &members { + // + let call = Call::NominationPools(NominationPoolsCall::bond_extra_other { + member: subxt::utils::MultiAddress::Id(member.clone()), + extra: BondExtra::Rewards, + }); + calls_for_batch.push(call); + summary.calls += 1; + } + summary.total_members = members.len() as u32; + } + + if calls_for_batch.len() > 0 { + // TODO check batch call weight or maximum_calls [default: 8] + // + // Calculate the number of extrinsics (iteractions) based on the maximum number of calls per batch + // and the number of calls to be sent + // + let maximum_batch_calls = (calls_for_batch.len() as f32 + / config.maximum_pool_members_calls as f32) + .ceil() as u32; + let mut iteration = Some(0); + while let Some(x) = iteration { + if x == maximum_batch_calls { + iteration = None; + } else { + let call_start_index: usize = + (x * config.maximum_pool_members_calls).try_into().unwrap(); + let call_end_index: usize = if config.maximum_pool_members_calls + > calls_for_batch[call_start_index..].len() as u32 + { + ((x * config.maximum_pool_members_calls) + + calls_for_batch[call_start_index..].len() as u32) + .try_into() + .unwrap() + } else { + ((x * config.maximum_pool_members_calls) + + config.maximum_pool_members_calls) + .try_into() + .unwrap() + }; + + debug!( + "batch pool_members_calls indexes [{:?} : {:?}]", + call_start_index, call_end_index + ); + + let calls_for_batch_clipped = + calls_for_batch[call_start_index..call_end_index].to_vec(); + + // Note: Unvalidated extrinsic. If it fails a static metadata file will need to be updated! + let tx = node_runtime::tx() + .utility() + .force_batch(calls_for_batch_clipped.clone()) + .unvalidated(); + + let batch_response = api + .tx() + .sign_and_submit_then_watch_default(&tx, signer) + .await? + .wait_for_finalized() + .await?; + + let tx_events = batch_response.fetch_events().await?; + + // Get block number + let block_number = if let Some(header) = + api.rpc().header(Some(tx_events.block_hash())).await? + { + header.number + } else { + 0 + }; + + // Iterate over events to calculate respective reward amounts + for event in tx_events.iter() { + let event = event?; + if let Some(_ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events#itemcompleted + // summary: A single item within a Batch of dispatches has completed with no error. + // + summary.calls_succeeded += 1; + } else if let Some(_ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events/#itemfailedspruntimedispatcherror + // summary: A single item within a Batch of dispatches has completed with error. + // + summary.calls_failed += 1; + } else if let Some(_ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events#batchcompleted + // summary: Batch of dispatches completed fully with no error. + info!( + "Nomination Pools Compound Batch Completed ({} calls)", + calls_for_batch_clipped.len() + ); + let b = Batch { + block_number, + extrinsic: tx_events.extrinsic_hash(), + }; + summary.batches.push(b); + } else if let Some(_ev) = + event.as_event::()? + { + // https://polkadot.js.org/docs/substrate/events/#batchcompletedwitherrors + // summary: Batch of dispatches completed but has errors. + info!( + "Nomination Pools Compound Batch Completed with errors ({} calls)", + calls_for_batch_clipped.len() + ); + let b = Batch { + block_number, + extrinsic: tx_events.extrinsic_hash(), + }; + summary.batches.push(b); + } + } + iteration = Some(x + 1); + } + } + } + + Ok(summary) +} + +pub async fn try_run_batch_payouts( + crunch: &Crunch, + signer: &PairSigner, +) -> Result<(Validators, PayoutSummary), CrunchError> { + let config = CONFIG.clone(); + let api = crunch.client().clone(); + // Warn if static metadata is no longer the same as the latest runtime version + + if node_runtime::validate_codegen(&api).is_err() { + warn!("Crunch upgrade might be required soon. Local static metadata differs from current chain runtime version."); + } + + // Get Era index + let active_era_addr = node_runtime::storage().staking().active_era(); + let active_era_index = match api + .storage() + .at_latest() + .await? + .fetch(&active_era_addr) + .await? + { + Some(info) => info.index, + None => return Err(CrunchError::Other("Active era not available".into())), + }; + + // Add unclaimed eras into payout staker calls + let mut calls_for_batch: Vec = vec![]; + let mut validators = collect_validators_data(&crunch, active_era_index).await?; + let mut summary: PayoutSummary = Default::default(); + + for v in &mut validators { + // + if v.unclaimed.len() > 0 { + let mut maximum_payouts = Some(config.maximum_payouts); + // define extrinsic payout stakers calls as many as unclaimed eras or maximum_payouts reached + while let Some(i) = maximum_payouts { + if i == 0 { + maximum_payouts = None; + } else { + if let Some(claim_era) = v.unclaimed.pop() { + let call = Call::Staking(StakingCall::payout_stakers { + validator_stash: v.stash.clone(), + era: claim_era, + }); + calls_for_batch.push(call); + summary.calls += 1; + } + maximum_payouts = Some(i - 1); + } + } + } + if v.is_active { + summary.next_minimum_expected += 1; + } + } + + if calls_for_batch.len() > 0 { + // TODO check batch call weight or maximum_calls [default: 8] + // + // Calculate the number of extrinsics (iteractions) based on the maximum number of calls per batch + // and the number of calls to be sent + // + let maximum_batch_calls = + (calls_for_batch.len() as f32 / config.maximum_calls as f32).ceil() as u32; + let mut iteration = Some(0); + while let Some(x) = iteration { + if x == maximum_batch_calls { + iteration = None; + } else { + let mut validator_index: ValidatorIndex = None; + let mut era_index: EraIndex = 0; + let mut validator_amount_value: ValidatorAmount = 0; + let mut nominators_amount_value: NominatorsAmount = 0; + let mut nominators_quantity = 0; + + let call_start_index: usize = + (x * config.maximum_calls).try_into().unwrap(); + let call_end_index: usize = if config.maximum_calls + > calls_for_batch[call_start_index..].len() as u32 + { + ((x * config.maximum_calls) + + calls_for_batch[call_start_index..].len() as u32) + .try_into() + .unwrap() + } else { + ((x * config.maximum_calls) + config.maximum_calls) + .try_into() + .unwrap() + }; + + debug!( + "batch call indexes [{:?} : {:?}]", + call_start_index, call_end_index + ); + + let calls_for_batch_clipped = + calls_for_batch[call_start_index..call_end_index].to_vec(); + + // Note: Unvalidated extrinsic. If it fails a static metadata file will need to be updated! + let tx = node_runtime::tx() + .utility() + .force_batch(calls_for_batch_clipped.clone()) + .unvalidated(); + + let batch_response = api + .tx() + .sign_and_submit_then_watch_default(&tx, signer) + .await? + .wait_for_finalized() + .await?; + + // Alternately, we could just `fetch_events`, which grabs all of the events like + // the above, but does not check for success, and leaves it up to you: + let tx_events = batch_response.fetch_events().await?; + + // Get block number + let block_number = if let Some(header) = + api.rpc().header(Some(tx_events.block_hash())).await? + { + header.number + } else { + 0 + }; + + // Iterate over events to calculate respective reward amounts + for event in tx_events.iter() { + let event = event?; + if let Some(_ev) = event.as_event::()? { + let dispatch_error = DispatchError::decode_from( + event.field_bytes(), + api.metadata(), + )?; + return Err(dispatch_error.into()); + } else if let Some(ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events#payoutstartedu32-accountid32 + // PayoutStarted(u32, AccountId32) + // summary: The stakers' rewards are getting paid. [era_index, validator_stash] + // + debug!("{:?}", ev); + let validator_index_ref = &mut validators + .iter() + .position(|v| v.stash == ev.validator_stash); + era_index = ev.era_index; + validator_index = *validator_index_ref; + validator_amount_value = 0; + nominators_amount_value = 0; + nominators_quantity = 0; + } else if let Some(ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events#rewardedaccountid32-u128 + // Rewarded(AccountId32, u128) + // summary: An account has been rewarded for their signed submission being finalized + // + debug!("{:?}", ev); + if let Some(i) = validator_index { + let validator = &mut validators[i]; + if ev.stash == validator.stash { + validator_amount_value = ev.amount; + } else { + nominators_amount_value += ev.amount; + nominators_quantity += 1; + } + } + } else if let Some(_ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events#itemcompleted + // summary: A single item within a Batch of dispatches has completed with no error. + // + if let Some(i) = validator_index { + let validator = &mut validators[i]; + // Add era to claimed vec + validator.claimed.push(era_index); + // Fetch stash points + let points = get_validator_points_info( + &crunch, + era_index, + &validator.stash, + ) + .await?; + + let p = Payout { + block_number, + extrinsic: tx_events.extrinsic_hash(), + era_index, + validator_amount_value, + nominators_amount_value, + nominators_quantity, + points, + }; + validator.payouts.push(p); + summary.calls_succeeded += 1; + } + } else if let Some(_ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events/#itemfailedspruntimedispatcherror + // summary: A single item within a Batch of dispatches has completed with error. + // + summary.calls_failed += 1; + } else if let Some(_ev) = event.as_event::()? { + // https://polkadot.js.org/docs/substrate/events#batchcompleted + // summary: Batch of dispatches completed fully with no error. + info!( + "Batch Completed ({} calls)", + calls_for_batch_clipped.len() + ); + } else if let Some(_ev) = + event.as_event::()? + { + // https://polkadot.js.org/docs/substrate/events/#batchcompletedwitherrors + // summary: Batch of dispatches completed but has errors. + info!( + "Batch Completed with errors ({} calls)", + calls_for_batch_clipped.len() + ); + } else if let Some(ev) = event.as_event::()? { + // NOTE: Deprecate with force_batch + // + // https://polkadot.js.org/docs/substrate/events#batchinterruptedu32-spruntimedispatcherror + // summary: Batch of dispatches did not complete fully. Index of first failing dispatch given, as well as the error. + // + // Fix: https://github.com/turboflakes/crunch/issues/4 + // Most likely the batch was interrupted because of an AlreadyClaimed era + // BatchInterrupted { index: 0, error: Module { index: 6, error: 14 } } + warn!("{:?}", ev); + if let Call::Staking(call) = + &calls_for_batch_clipped[usize::try_from(ev.index).unwrap()] + { + match &call { + StakingCall::payout_stakers { + validator_stash, .. + } => { + warn!( + "Batch interrupted at stash: {:?}", + validator_stash + ); + let validator_index = &mut validators + .iter() + .position(|v| v.stash == *validator_stash); + + if let Some(i) = *validator_index { + let validator = &mut validators[i]; + // TODO: decode DispatchError to a readable format + validator + .warnings + .push("⚡ Batch interrupted ⚡".to_string()); + } + } + _ => unreachable!(), + }; + } + } + } + + iteration = Some(x + 1); + } + } + } + + debug!("validators {:?}", validators); + + // Prepare summary report + summary.total_validators = validators.len() as u32; + summary.total_validators_previous_era_already_claimed = validators + .iter() + .map(|v| v.claimed.contains(&(active_era_index - 1)) as u32) + .reduce(|a, b| a + b) + .unwrap_or_default(); + Ok((validators, summary)) +} + +async fn collect_validators_data( + crunch: &Crunch, + era_index: EraIndex, +) -> Result { + let api = crunch.client().clone(); + + // Get unclaimed eras for the stash addresses + let active_validators_addr = node_runtime::storage().session().validators(); + let active_validators = api + .storage() + .at_latest() + .await? + .fetch(&active_validators_addr) + .await?; + debug!("active_validators {:?}", active_validators); + let mut validators: Validators = Vec::new(); + + let stashes = get_stashes(&crunch).await?; + + for (_i, stash_str) in stashes.iter().enumerate() { + let stash = AccountId32::from_str(stash_str).map_err(|e| { + CrunchError::Other(format!("Invalid account: {stash_str} error: {e:?}")) + })?; + + // Check if stash has bonded controller + let controller_addr = node_runtime::storage().staking().bonded(&stash); + let controller = match api + .storage() + .at_latest() + .await? + .fetch(&controller_addr) + .await? + { + Some(controller) => controller, + None => { + let mut v = Validator::new(stash.clone()); + v.warnings = vec![format!( + "Stash {} does not have a bonded Controller account!", + stash + )]; + validators.push(v); + continue; + } + }; + debug!("controller {:?}", controller); + // Instantiates a new validator struct + let mut v = Validator::new(stash.clone()); + + // Set controller + v.controller = Some(controller.clone()); + + // Get validator name + v.name = get_display_name(&crunch, &stash, None).await?; + + // Check if validator is in active set + v.is_active = if let Some(ref av) = active_validators { + av.contains(&stash) + } else { + false + }; + + // Look for unclaimed eras, starting on current_era - maximum_eras + let start_index = get_era_index_start(era_index, crunch).await?; + + // Get staking info from ledger + let ledger_addr = node_runtime::storage().staking().ledger(&controller); + if let Some(staking_ledger) = + api.storage().at_latest().await?.fetch(&ledger_addr).await? + { + debug!( + "{} * claimed_rewards: {:?}", + stash, staking_ledger.claimed_rewards + ); + + // deconstruct claimed rewards + let BoundedVec(claimed_rewards) = staking_ledger.claimed_rewards; + // Find unclaimed eras in previous 84 eras (reverse order) + for e in (start_index..era_index).rev() { + // If reward was already claimed skip it + if claimed_rewards.contains(&e) { + if e == era_index - 1 { + v.is_previous_era_already_claimed = true; + } + v.claimed.push(e); + continue; + } + // Verify if stash was active in set + let eras_stakers_addr = + node_runtime::storage().staking().eras_stakers(&e, &stash); + if let Some(exposure) = api + .storage() + .at_latest() + .await? + .fetch(&eras_stakers_addr) + .await? + { + if exposure.total > 0 { + v.unclaimed.push(e) + } + } + } + } + validators.push(v); + } + debug!("validators {:?}", validators); + Ok(validators) +} + +async fn get_era_index_start( + era_index: EraIndex, + crunch: &Crunch, +) -> Result { + let api = crunch.client().clone(); + let config = CONFIG.clone(); + + let history_depth_addr = node_runtime::constants().staking().history_depth(); + let history_depth: u32 = api.constants().at(&history_depth_addr)?; + + if era_index < cmp::min(config.maximum_history_eras, history_depth) { + return Ok(0); + } else if config.is_short { + return Ok(era_index - cmp::min(config.maximum_history_eras, history_depth)); + } else { + // Note: If crunch is running in verbose mode, ignore MAXIMUM_ERAS + // since we still want to show information about inclusion and eras crunched for all history_depth + return Ok(era_index - history_depth); + } +} + +async fn get_validator_points_info( + crunch: &Crunch, + era_index: EraIndex, + stash: &AccountId32, +) -> Result { + let api = crunch.client().clone(); + // Get era reward points + let era_reward_points_addr = node_runtime::storage() + .staking() + .eras_reward_points(&era_index); + + if let Some(era_reward_points) = api + .storage() + .at_latest() + .await? + .fetch(&era_reward_points_addr) + .await? + { + let stash_points = match era_reward_points + .individual + .iter() + .find(|(s, _)| *s == *stash) + { + Some((_, p)) => *p, + None => 0, + }; + + // Calculate average points + let mut points: Vec = era_reward_points + .individual + .into_iter() + .map(|(_, points)| points) + .collect(); + + let points_f64: Vec = points.iter().map(|points| *points as f64).collect(); + + let points = Points { + validator: stash_points, + era_avg: stats::mean(&points_f64), + ci99_9_interval: stats::confidence_interval_99_9(&points_f64), + outlier_limits: stats::iqr_interval(&mut points), + }; + + Ok(points) + } else { + Ok(Points::default()) + } +} + +#[async_recursion] +async fn get_display_name( + crunch: &Crunch, + stash: &AccountId32, + sub_account_name: Option, +) -> Result { + let api = crunch.client().clone(); + + let identity_of_addr = node_runtime::storage().identity().identity_of(stash); + match api + .storage() + .at_latest() + .await? + .fetch(&identity_of_addr) + .await? + { + Some(identity) => { + debug!("identity {:?}", identity); + let parent = parse_identity_data(identity.info.display); + let name = match sub_account_name { + Some(child) => format!("{}/{}", parent, child), + None => parent, + }; + Ok(name) + } + None => { + let super_of_addr = node_runtime::storage().identity().super_of(stash); + if let Some((parent_account, data)) = api + .storage() + .at_latest() + .await? + .fetch(&super_of_addr) + .await? + { + let sub_account_name = parse_identity_data(data); + return get_display_name( + &crunch, + &parent_account, + Some(sub_account_name.to_string()), + ) + .await; + } else { + let s = &stash.to_string(); + Ok(format!("{}...{}", &s[..6], &s[s.len() - 6..])) + } + } + } +} + +// +fn parse_identity_data( + data: node_runtime::runtime_types::pallet_identity::types::Data, +) -> String { + match data { + node_runtime::runtime_types::pallet_identity::types::Data::Raw0(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw1(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw2(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw3(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw4(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw5(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw6(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw7(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw8(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw9(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw10(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw11(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw12(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw13(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw14(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw15(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw16(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw17(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw18(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw19(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw20(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw21(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw22(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw23(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw24(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw25(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw26(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw27(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw28(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw29(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw30(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw31(bytes) => { + str(bytes.to_vec()) + } + node_runtime::runtime_types::pallet_identity::types::Data::Raw32(bytes) => { + str(bytes.to_vec()) + } + _ => format!("???"), + } +} + +fn str(bytes: Vec) -> String { + format!("{}", String::from_utf8(bytes).expect("Identity not utf-8")) +} + +pub async fn inspect(crunch: &Crunch) -> Result<(), CrunchError> { + let api = crunch.client().clone(); + + let stashes = get_stashes(&crunch).await?; + info!("Inspect {} stashes -> {}", stashes.len(), stashes.join(",")); + + let history_depth_addr = node_runtime::constants().staking().history_depth(); + let mut history_depth: u32 = api.constants().at(&history_depth_addr)?; + + let active_era_addr = node_runtime::storage().staking().active_era(); + let active_era_index = match api + .storage() + .at_latest() + .await? + .fetch(&active_era_addr) + .await? + { + Some(info) => info.index, + None => return Err(CrunchError::Other("Active era not available".into())), + }; + + // if the network is newer than the max history depth + if history_depth > active_era_index { + history_depth = active_era_index; + } + + for stash_str in stashes.iter() { + let stash = AccountId32::from_str(stash_str).map_err(|e| { + CrunchError::Other(format!("Invalid account: {stash_str} error: {e:?}")) + })?; + info!("{} * Stash account", stash); + + let start_index = active_era_index - history_depth; + let mut unclaimed: Vec = Vec::new(); + let mut claimed: Vec = Vec::new(); + + let bonded_addr = node_runtime::storage().staking().bonded(&stash); + if let Some(controller) = + api.storage().at_latest().await?.fetch(&bonded_addr).await? + { + let ledger_addr = node_runtime::storage().staking().ledger(&controller); + if let Some(ledger_response) = + api.storage().at_latest().await?.fetch(&ledger_addr).await? + { + // deconstruct claimed rewards + let BoundedVec(claimed_rewards) = ledger_response.claimed_rewards; + // Find unclaimed eras in previous 84 eras + for era_index in start_index..active_era_index { + // If reward was already claimed skip it + if claimed_rewards.contains(&era_index) { + claimed.push(era_index); + continue; + } + // Verify if stash was active in set + let eras_stakers_addr = node_runtime::storage() + .staking() + .eras_stakers(&era_index, &stash); + if let Some(exposure) = api + .storage() + .at_latest() + .await? + .fetch(&eras_stakers_addr) + .await? + { + if exposure.total > 0 { + unclaimed.push(era_index) + } + } + } + } + } + info!( + "{} claimed eras in the last {} -> {:?}", + claimed.len(), + history_depth, + claimed + ); + info!( + "{} unclaimed eras in the last {} -> {:?}", + unclaimed.len(), + history_depth, + unclaimed + ); + } + info!("Job done!"); + Ok(()) +} + +pub async fn get_stashes(crunch: &Crunch) -> Result, CrunchError> { + let config = CONFIG.clone(); + + let mut stashes: Vec = config.stashes; + info!("{} stashes loaded from 'config.stashes'", stashes.len()); + + if let Some(remotes) = try_fetch_stashes_from_remote_url().await? { + stashes.extend(remotes); + }; + + if let Some(nominees) = try_fetch_stashes_from_pool_ids(&crunch).await? { + stashes.extend(nominees); + } + + if config.unique_stashes_enabled { + // sort and remove duplicates + stashes.sort(); + stashes.dedup(); + } + + Ok(stashes) +} + +pub async fn try_fetch_pool_operators_for_compound( + crunch: &Crunch, +) -> Result>, CrunchError> { + let config = CONFIG.clone(); + + if config.pool_ids.len() == 0 && !config.pool_only_operator_compound_enabled { + return Ok(None); + } + + let api = crunch.client().clone(); + + let mut members: Vec = Vec::new(); + + for pool_id in &config.pool_ids { + let bonded_pool_addr = node_runtime::storage() + .nomination_pools() + .bonded_pools(pool_id); + if let Some(pool) = api + .storage() + .at_latest() + .await? + .fetch(&bonded_pool_addr) + .await? + { + let permissions_addr = node_runtime::storage() + .nomination_pools() + .claim_permissions(pool.roles.depositor.clone()); + + if let Some(permissions) = api + .storage() + .at_latest() + .await? + .fetch(&permissions_addr) + .await? + { + if [ + ClaimPermission::PermissionlessCompound, + ClaimPermission::PermissionlessAll, + ] + .contains(&permissions) + { + // fetch pending rewards + let call_name = format!("NominationPoolsApi_pending_rewards"); + let claimable: u128 = api + .rpc() + .state_call( + &call_name, + Some(&pool.roles.depositor.clone().encode()), + None, + ) + .await?; + if claimable > config.pool_compound_threshold.into() { + members.push(pool.roles.depositor.clone()); + } + } + } + } + } + Ok(Some(members)) +} + +pub async fn try_fetch_pool_members_for_compound( + crunch: &Crunch, +) -> Result>, CrunchError> { + let config = CONFIG.clone(); + if config.pool_ids.len() == 0 + && !config.pool_only_operator_compound_enabled + && !config.pool_members_compound_enabled + { + return Ok(None); + } + + if config.pool_only_operator_compound_enabled { + return try_fetch_pool_operators_for_compound(&crunch).await; + } + + let api = crunch.client().clone(); + + let mut members: Vec = Vec::new(); + + // 1. get all members with permissions set as [PermissionlessCompound, PermissionlessAll] + let permissions_addr = node_runtime::storage() + .nomination_pools() + .claim_permissions_root(); + + let mut results = api + .storage() + .at_latest() + .await? + .iter(permissions_addr, 10) + .await?; + + while let Some((key, value)) = results.next().await? { + if [ + ClaimPermission::PermissionlessCompound, + ClaimPermission::PermissionlessAll, + ] + .contains(&value) + { + let member = get_account_id_from_storage_key(key); + debug!("member: {}", member); + + // 2 .Verify if member belongs to the pools configured + let pool_member_addr = node_runtime::storage() + .nomination_pools() + .pool_members(&member); + if let Some(pool_member) = api + .storage() + .at_latest() + .await? + .fetch(&pool_member_addr) + .await? + { + if config.pool_ids.contains(&pool_member.pool_id) { + // fetch pending rewards + let call_name = format!("NominationPoolsApi_pending_rewards"); + let claimable: u128 = api + .rpc() + .state_call(&call_name, Some(&member.encode()), None) + .await?; + if claimable > config.pool_compound_threshold.into() { + members.push(member); + } + } + } + } + } + + Ok(Some(members)) +} + +pub async fn try_fetch_stashes_from_pool_ids( + crunch: &Crunch, +) -> Result>, CrunchError> { + let api = crunch.client().clone(); + let config = CONFIG.clone(); + if config.pool_ids.len() == 0 + || (!config.pool_active_nominees_payout_enabled + && !config.pool_all_nominees_payout_enabled) + { + return Ok(None); + } + + let active_era_addr = node_runtime::storage().staking().active_era(); + let era_index = match api + .storage() + .at_latest() + .await? + .fetch(&active_era_addr) + .await? + { + Some(info) => info.index, + None => return Err("Active era not defined".into()), + }; + + let mut all: Vec = Vec::new(); + let mut active: Vec = Vec::new(); + + for pool_id in config.pool_ids.iter() { + let pool_stash_account = nomination_pool_account(AccountType::Bonded, *pool_id); + let nominators_addr = node_runtime::storage() + .staking() + .nominators(&pool_stash_account); + if let Some(nominations) = api + .storage() + .at_latest() + .await? + .fetch(&nominators_addr) + .await? + { + // deconstruct targets + let BoundedVec(targets) = nominations.targets; + all.extend( + targets + .iter() + .map(|s| s.to_string()) + .collect::>(), + ); + + // NOTE_1: Only check active nominees from previous era + // By the end of current era crunch will trigger any payout left from previous eras if that is the case. + // NOTE_2: Ideally nominees shouldn't have any pending payouts, but is in the best interest of the pool members + // that pool operators trigger payouts as a backup at least for the active nominees. + for stash in targets { + let eras_stakers_addr = node_runtime::storage() + .staking() + .eras_stakers(era_index - 1, &stash); + if let Some(exposure) = api + .storage() + .at_latest() + .await? + .fetch(&eras_stakers_addr) + .await? + { + if exposure.others.iter().any(|x| x.who == pool_stash_account) { + active.push(stash.to_string()); + } + } + } + } + } + if all.is_empty() && active.is_empty() { + return Ok(None); + } + + if config.pool_all_nominees_payout_enabled { + info!( + "{} stashes loaded from 'pool-ids': [{}]", + all.len(), + config + .pool_ids + .iter() + .map(|p| p.to_string()) + .collect::>() + .join(",") + ); + + return Ok(Some(all)); + } + + // Note: by default only active nominees (stashes) are triggered + info!( + "{} active stashes loaded from 'pool-ids': [{}]", + active.len(), + config + .pool_ids + .iter() + .map(|p| p.to_string()) + .collect::>() + .join(",") + ); + + Ok(Some(active)) +} diff --git a/src/runtimes/kusama.rs b/src/runtimes/kusama.rs index 60be8b1..a5fe94f 100644 --- a/src/runtimes/kusama.rs +++ b/src/runtimes/kusama.rs @@ -19,25 +19,31 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -use crate::config::CONFIG; -use crate::crunch::{ - get_account_id_from_storage_key, get_from_seed, random_wait, try_fetch_onet_data, - try_fetch_stashes_from_remote_url, Crunch, NominatorsAmount, ValidatorAmount, - ValidatorIndex, -}; -use crate::errors::CrunchError; -use crate::pools::{nomination_pool_account, AccountType}; -use crate::report::{ - Batch, EraIndex, Network, NominationPoolsSummary, Payout, PayoutSummary, Points, - RawData, Report, Signer, Validator, Validators, +use crate::{ + config::CONFIG, + crunch::{ + get_account_id_from_storage_key, get_from_seed, random_wait, try_fetch_onet_data, + try_fetch_stashes_from_remote_url, Crunch, NominatorsAmount, ValidatorAmount, + ValidatorIndex, + }, + errors::CrunchError, + pools::{nomination_pool_account, AccountType}, + report::{ + Batch, EraIndex, Network, NominationPoolsSummary, Payout, PayoutSummary, Points, + RawData, Signer, Validator, Validators, + }, + stats, }; -use crate::stats; use async_recursion::async_recursion; use futures::StreamExt; use log::{debug, info, warn}; use std::{ - cmp, convert::TryFrom, convert::TryInto, fs, result::Result, str::FromStr, thread, - time, + cmp, + convert::{TryFrom, TryInto}, + fs, + result::Result, + str::FromStr, + thread, time, }; use subxt::{ error::DispatchError, @@ -57,17 +63,16 @@ use subxt::{ mod node_runtime {} use node_runtime::{ - runtime_types::bounded_collections::bounded_vec::BoundedVec, - runtime_types::pallet_nomination_pools::{BondExtra, ClaimPermission}, - staking::events::EraPaid, - staking::events::PayoutStarted, - staking::events::Rewarded, + runtime_types::{ + bounded_collections::bounded_vec::BoundedVec, + pallet_nomination_pools::{BondExtra, ClaimPermission}, + }, + staking::events::{EraPaid, PayoutStarted, Rewarded}, system::events::ExtrinsicFailed, - utility::events::BatchCompleted, - utility::events::BatchCompletedWithErrors, - utility::events::BatchInterrupted, - utility::events::ItemCompleted, - utility::events::ItemFailed, + utility::events::{ + BatchCompleted, BatchCompletedWithErrors, BatchInterrupted, ItemCompleted, + ItemFailed, + }, }; type Call = node_runtime::runtime_types::kusama_runtime::RuntimeCall; @@ -202,7 +207,7 @@ pub async fn try_crunch(crunch: &Crunch) -> Result<(), CrunchError> { }; debug!("network {:?}", network); - let data = RawData { + let _data = RawData { network, signer, validators, @@ -210,11 +215,6 @@ pub async fn try_crunch(crunch: &Crunch) -> Result<(), CrunchError> { pools_summary, }; - let report = Report::from(data); - crunch - .send_message(&report.message(), &report.formatted_message()) - .await?; - Ok(()) } diff --git a/src/runtimes/mod.rs b/src/runtimes/mod.rs index 9b50ff3..3410ac8 100644 --- a/src/runtimes/mod.rs +++ b/src/runtimes/mod.rs @@ -21,6 +21,7 @@ #![allow(clippy::all)] +pub mod creditcoin; pub mod kusama; pub mod polkadot; pub mod support; diff --git a/src/runtimes/polkadot.rs b/src/runtimes/polkadot.rs index fd12607..410d5b1 100644 --- a/src/runtimes/polkadot.rs +++ b/src/runtimes/polkadot.rs @@ -19,25 +19,31 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -use crate::config::CONFIG; -use crate::crunch::{ - get_account_id_from_storage_key, get_from_seed, random_wait, try_fetch_onet_data, - try_fetch_stashes_from_remote_url, Crunch, NominatorsAmount, ValidatorAmount, - ValidatorIndex, -}; -use crate::errors::CrunchError; -use crate::pools::{nomination_pool_account, AccountType}; -use crate::report::{ - Batch, EraIndex, Network, NominationPoolsSummary, Payout, PayoutSummary, Points, - RawData, Report, Signer, Validator, Validators, +use crate::{ + config::CONFIG, + crunch::{ + get_account_id_from_storage_key, get_from_seed, random_wait, try_fetch_onet_data, + try_fetch_stashes_from_remote_url, Crunch, NominatorsAmount, ValidatorAmount, + ValidatorIndex, + }, + errors::CrunchError, + pools::{nomination_pool_account, AccountType}, + report::{ + Batch, EraIndex, Network, NominationPoolsSummary, Payout, PayoutSummary, Points, + Signer, Validator, Validators, + }, + stats, }; -use crate::stats; use async_recursion::async_recursion; use futures::StreamExt; use log::{debug, info, warn}; use std::{ - cmp, convert::TryFrom, convert::TryInto, fs, result::Result, str::FromStr, thread, - time, + cmp, + convert::{TryFrom, TryInto}, + fs, + result::Result, + str::FromStr, + thread, time, }; use subxt::{ error::DispatchError, @@ -57,17 +63,16 @@ use subxt::{ mod node_runtime {} use node_runtime::{ - runtime_types::bounded_collections::bounded_vec::BoundedVec, - runtime_types::pallet_nomination_pools::{BondExtra, ClaimPermission}, - staking::events::EraPaid, - staking::events::PayoutStarted, - staking::events::Rewarded, + runtime_types::{ + bounded_collections::bounded_vec::BoundedVec, + pallet_nomination_pools::{BondExtra, ClaimPermission}, + }, + staking::events::{EraPaid, PayoutStarted, Rewarded}, system::events::ExtrinsicFailed, - utility::events::BatchCompleted, - utility::events::BatchCompletedWithErrors, - utility::events::BatchInterrupted, - utility::events::ItemCompleted, - utility::events::ItemFailed, + utility::events::{ + BatchCompleted, BatchCompletedWithErrors, BatchInterrupted, ItemCompleted, + ItemFailed, + }, }; type Call = node_runtime::runtime_types::polkadot_runtime::RuntimeCall; @@ -145,11 +150,12 @@ pub async fn try_crunch(crunch: &Crunch) -> Result<(), CrunchError> { } // Try run payouts in batches - let (mut validators, payout_summary) = + let (mut validators, _payout_summary) = try_run_batch_payouts(&crunch, &seed_account_signer).await?; // Try run members in batches - let pools_summary = try_run_batch_pool_members(&crunch, &seed_account_signer).await?; + let _pools_summary = + try_run_batch_pool_members(&crunch, &seed_account_signer).await?; // Get Network name let chain_name = api.rpc().system_chain().await?; @@ -201,20 +207,6 @@ pub async fn try_crunch(crunch: &Crunch) -> Result<(), CrunchError> { token_decimals, }; debug!("network {:?}", network); - - let data = RawData { - network, - signer, - validators, - payout_summary, - pools_summary, - }; - - let report = Report::from(data); - crunch - .send_message(&report.message(), &report.formatted_message()) - .await?; - Ok(()) } diff --git a/src/runtimes/support.rs b/src/runtimes/support.rs index add8327..39191a3 100644 --- a/src/runtimes/support.rs +++ b/src/runtimes/support.rs @@ -27,6 +27,7 @@ pub enum SupportedRuntime { Polkadot, Kusama, Westend, + Creditcoin, } impl From for SupportedRuntime { @@ -34,7 +35,7 @@ impl From for SupportedRuntime { match v { 0 => Self::Polkadot, 2 => Self::Kusama, - 42 => Self::Westend, + 42 => Self::Creditcoin, _ => unimplemented!("Chain prefix not supported"), } } @@ -42,11 +43,13 @@ impl From for SupportedRuntime { impl From for SupportedRuntime { fn from(v: ChainTokenSymbol) -> Self { + println!("{:?}", v); match v.as_str() { "DOT" => Self::Polkadot, "KSM" => Self::Kusama, "WND" => Self::Westend, - _ => unimplemented!("Chain unit not supported"), + "CTC" => Self::Creditcoin, + _ => Self::Creditcoin, } } } @@ -57,6 +60,7 @@ impl std::fmt::Display for SupportedRuntime { Self::Polkadot => write!(f, "Polkadot"), Self::Kusama => write!(f, "Kusama"), Self::Westend => write!(f, "Westend"), + Self::Creditcoin => write!(f, "Creditcoin"), } } } diff --git a/src/runtimes/westend.rs b/src/runtimes/westend.rs index a25eb73..569f6ae 100644 --- a/src/runtimes/westend.rs +++ b/src/runtimes/westend.rs @@ -19,25 +19,31 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -use crate::config::CONFIG; -use crate::crunch::{ - get_account_id_from_storage_key, get_from_seed, random_wait, try_fetch_onet_data, - try_fetch_stashes_from_remote_url, Crunch, NominatorsAmount, ValidatorAmount, - ValidatorIndex, -}; -use crate::errors::CrunchError; -use crate::pools::{nomination_pool_account, AccountType}; -use crate::report::{ - Batch, EraIndex, Network, NominationPoolsSummary, Payout, PayoutSummary, Points, - RawData, Report, Signer, Validator, Validators, +use crate::{ + config::CONFIG, + crunch::{ + get_account_id_from_storage_key, get_from_seed, random_wait, try_fetch_onet_data, + try_fetch_stashes_from_remote_url, Crunch, NominatorsAmount, ValidatorAmount, + ValidatorIndex, + }, + errors::CrunchError, + pools::{nomination_pool_account, AccountType}, + report::{ + Batch, EraIndex, Network, NominationPoolsSummary, Payout, PayoutSummary, Points, + RawData, Signer, Validator, Validators, + }, + stats, }; -use crate::stats; use async_recursion::async_recursion; use futures::StreamExt; use log::{debug, info, warn}; use std::{ - cmp, convert::TryFrom, convert::TryInto, fs, result::Result, str::FromStr, thread, - time, + cmp, + convert::{TryFrom, TryInto}, + fs, + result::Result, + str::FromStr, + thread, time, }; use subxt::{ error::DispatchError, @@ -57,17 +63,16 @@ use subxt::{ mod node_runtime {} use node_runtime::{ - runtime_types::bounded_collections::bounded_vec::BoundedVec, - runtime_types::pallet_nomination_pools::{BondExtra, ClaimPermission}, - staking::events::EraPaid, - staking::events::PayoutStarted, - staking::events::Rewarded, + runtime_types::{ + bounded_collections::bounded_vec::BoundedVec, + pallet_nomination_pools::{BondExtra, ClaimPermission}, + }, + staking::events::{EraPaid, PayoutStarted, Rewarded}, system::events::ExtrinsicFailed, - utility::events::BatchCompleted, - utility::events::BatchCompletedWithErrors, - utility::events::BatchInterrupted, - utility::events::ItemCompleted, - utility::events::ItemFailed, + utility::events::{ + BatchCompleted, BatchCompletedWithErrors, BatchInterrupted, ItemCompleted, + ItemFailed, + }, }; type Call = node_runtime::runtime_types::westend_runtime::RuntimeCall; @@ -202,7 +207,7 @@ pub async fn try_crunch(crunch: &Crunch) -> Result<(), CrunchError> { }; debug!("network {:?}", network); - let data = RawData { + let _data = RawData { network, signer, validators, @@ -210,11 +215,6 @@ pub async fn try_crunch(crunch: &Crunch) -> Result<(), CrunchError> { pools_summary, }; - let report = Report::from(data); - crunch - .send_message(&report.message(), &report.formatted_message()) - .await?; - Ok(()) } diff --git a/src/stats.rs b/src/stats.rs index a14e6c7..b806229 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -19,8 +19,9 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. +#[allow(clippy::ptr_arg)] pub fn mean(list: &Vec) -> f64 { - if list.len() == 0 { + if list.is_empty() { return 0.0; } let sum: f64 = list.iter().sum(); @@ -29,13 +30,13 @@ pub fn mean(list: &Vec) -> f64 { pub fn standard_deviation(list: &Vec) -> f64 { let m = mean(list); - let mut variance: Vec = - list.iter().map(|&score| (score - m).powf(2.0)).collect(); - mean(&mut variance).sqrt() + let variance: Vec = list.iter().map(|&score| (score - m).powf(2.0)).collect(); + mean(&variance).sqrt() } +#[allow(clippy::ptr_arg)] pub fn median(list: &mut Vec) -> u32 { - if list.len() == 0 { + if list.is_empty() { return 0; } list.sort(); @@ -67,13 +68,14 @@ pub fn confidence_interval(list: &Vec, z: f64) -> (f64, f64) { } // Find outliers by Interquartile Range(IQR) // https://www.statisticshowto.com/statistics-basics/find-outliers/ +#[allow(clippy::ptr_arg)] pub fn iqr_interval(list: &mut Vec) -> (f64, f64) { - if list.len() == 0 { + if list.is_empty() { return (0.0, 0.0); } list.sort(); let q1 = median(&mut (&list[..&list.len() / 2]).into()); - let q3 = median(&mut (&list[&list.len() - (&list.len() / 2)..]).into()); + let q3 = median(&mut (&list[list.len() - (&list.len() / 2)..]).into()); let iqr = q3 - q1; ( (q1 as f64) - (iqr as f64 * 1.5),
- + Westend Crunch Bot (Public)
- + Kusama Crunch Bot (Public)
- + Polkadot Crunch Bot (Public)